From 8655e839d78eba427711f230665a6d02f38c6fe1 Mon Sep 17 00:00:00 2001 From: MasterPtato Date: Tue, 2 Jul 2024 00:23:45 +0000 Subject: [PATCH] feat(clusters): convert clusters to new workflow system --- docs/libraries/workflow/DESIGN.md | 5 + docs/libraries/workflow/GLOSSARY.md | 36 +- docs/libraries/workflow/GOTCHAS.md | 15 + docs/libraries/workflow/SIGNALS.md | 7 - .../workflow/SIGNALS_AND_MESSAGES.md | 29 ++ lib/bolt/core/src/tasks/gen.rs | 6 - lib/chirp-workflow/core/src/ctx/activity.rs | 4 + lib/chirp-workflow/core/src/ctx/operation.rs | 1 + lib/chirp-workflow/core/src/ctx/workflow.rs | 29 +- lib/chirp-workflow/core/src/executable.rs | 6 +- lib/chirp-workflow/core/src/signal.rs | 4 +- lib/chirp-workflow/macros/src/lib.rs | 37 +- proto/backend/cluster.proto | 5 + svc/Cargo.lock | 374 ++----------- svc/Cargo.toml | 21 +- svc/api/admin/Cargo.toml | 10 +- svc/api/cloud/Cargo.toml | 3 +- svc/api/provision/Cargo.toml | 6 +- svc/api/traefik-provider/Cargo.toml | 2 +- svc/pkg/cluster/Cargo.toml | 40 ++ svc/pkg/cluster/{worker => }/Service.toml | 6 +- svc/pkg/cluster/{util => }/build.rs | 16 +- .../20240701225245_add_json.down.sql | 0 .../migrations/20240701225245_add_json.up.sql | 13 + svc/pkg/cluster/ops/datacenter-get/Cargo.toml | 19 - .../cluster/ops/datacenter-get/Service.toml | 10 - svc/pkg/cluster/ops/datacenter-get/src/lib.rs | 107 ---- .../cluster/ops/datacenter-list/Cargo.toml | 19 - .../cluster/ops/datacenter-list/Service.toml | 10 - .../cluster/ops/datacenter-list/src/lib.rs | 62 --- .../ops/datacenter-location-get/Cargo.toml | 20 - .../ops/datacenter-location-get/Service.toml | 7 - .../ops/datacenter-location-get/src/lib.rs | 100 ---- .../Service.toml | 10 - .../datacenter-resolve-for-name-id/src/lib.rs | 41 -- .../cluster/ops/datacenter-tls-get/Cargo.toml | 19 - .../ops/datacenter-tls-get/Service.toml | 10 - .../cluster/ops/datacenter-tls-get/src/lib.rs | 60 --- .../ops/datacenter-topology-get/Cargo.toml | 25 - .../ops/datacenter-topology-get/README.md | 3 - .../ops/datacenter-topology-get/Service.toml | 10 - svc/pkg/cluster/ops/get-for-game/Cargo.toml | 20 - svc/pkg/cluster/ops/get-for-game/Service.toml | 10 - svc/pkg/cluster/ops/get-for-game/src/lib.rs | 42 -- svc/pkg/cluster/ops/get/Cargo.toml | 19 - svc/pkg/cluster/ops/get/Service.toml | 10 - svc/pkg/cluster/ops/get/src/lib.rs | 53 -- svc/pkg/cluster/ops/list/Cargo.toml | 19 - svc/pkg/cluster/ops/list/Service.toml | 10 - svc/pkg/cluster/ops/list/src/lib.rs | 46 -- .../ops/resolve-for-name-id/Cargo.toml | 19 - .../ops/resolve-for-name-id/Service.toml | 10 - .../ops/resolve-for-name-id/src/lib.rs | 35 -- .../ops/server-destroy-with-filter/Cargo.toml | 19 - .../server-destroy-with-filter/Service.toml | 7 - svc/pkg/cluster/ops/server-get/Cargo.toml | 20 - svc/pkg/cluster/ops/server-get/Service.toml | 10 - svc/pkg/cluster/ops/server-get/src/lib.rs | 71 --- svc/pkg/cluster/ops/server-list/Cargo.toml | 19 - svc/pkg/cluster/ops/server-list/Service.toml | 10 - svc/pkg/cluster/ops/server-list/src/lib.rs | 123 ----- .../ops/server-resolve-for-ip/Cargo.toml | 19 - .../ops/server-resolve-for-ip/Service.toml | 10 - .../ops/server-resolve-for-ip/src/lib.rs | 42 -- svc/pkg/cluster/src/lib.rs | 15 + svc/pkg/cluster/src/ops/datacenter/get.rs | 129 +++++ svc/pkg/cluster/src/ops/datacenter/list.rs | 56 ++ .../src/ops/datacenter/location_get.rs | 121 +++++ svc/pkg/cluster/src/ops/datacenter/mod.rs | 6 + .../src/ops/datacenter/resolve_for_name_id.rs | 40 ++ svc/pkg/cluster/src/ops/datacenter/tls_get.rs | 85 +++ .../ops/datacenter/topology_get.rs} | 77 +-- svc/pkg/cluster/src/ops/get.rs | 31 ++ svc/pkg/cluster/src/ops/get_for_game.rs | 40 ++ svc/pkg/cluster/src/ops/list.rs | 24 + svc/pkg/cluster/src/ops/mod.rs | 6 + .../cluster/src/ops/resolve_for_name_id.rs | 36 ++ .../ops/server/destroy_with_filter.rs} | 44 +- svc/pkg/cluster/src/ops/server/get.rs | 73 +++ svc/pkg/cluster/src/ops/server/list.rs | 66 +++ svc/pkg/cluster/src/ops/server/mod.rs | 4 + .../cluster/src/ops/server/resolve_for_ip.rs | 42 ++ svc/pkg/cluster/src/types.rs | 183 +++++++ .../cluster/{util/src => src/util}/metrics.rs | 0 svc/pkg/cluster/src/util/mod.rs | 36 ++ .../cluster/{util/src => src/util}/test.rs | 0 svc/pkg/cluster/src/workflows/cluster.rs | 167 ++++++ .../cluster/src/workflows/datacenter/mod.rs | 277 ++++++++++ .../workflows/datacenter/scale.rs} | 367 +++++++------ .../workflows/datacenter/tls_issue.rs} | 271 ++++++---- svc/pkg/cluster/src/workflows/mod.rs | 3 + .../workflows/server/destroy.rs} | 1 - .../workflows/server/dns_create.rs} | 0 .../workflows/server/dns_delete.rs} | 0 .../workflows/server/drain.rs} | 0 .../install_scripts/components/mod.rs | 0 .../install_scripts/components/nomad.rs | 0 .../install_scripts/components/ok_server.rs | 0 .../install_scripts/components/rivet.rs | 0 .../install}/install_scripts/components/s3.rs | 0 .../install_scripts/components/traefik.rs | 0 .../components/traffic_server.rs | 0 .../install_scripts/components/vector.rs | 0 .../install_scripts/files/cni_plugins.sh | 0 .../install}/install_scripts/files/docker.sh | 0 .../install_scripts/files/node_exporter.sh | 0 .../install_scripts/files/nomad_configure.sh | 0 .../install_scripts/files/nomad_install.sh | 0 .../install_scripts/files/ok_server.sh | 0 .../files/rivet_create_hook.sh | 0 .../install_scripts/files/rivet_fetch_info.sh | 0 .../install_scripts/files/rivet_fetch_tls.sh | 0 .../install}/install_scripts/files/sysctl.sh | 0 .../install}/install_scripts/files/traefik.sh | 0 .../install_scripts/files/traefik_instance.sh | 0 .../files/traffic_server/etc/cache.config | 0 .../files/traffic_server/etc/hosting.config | 0 .../files/traffic_server/etc/ip_allow.yaml | 0 .../files/traffic_server/etc/logging.yaml | 0 .../files/traffic_server/etc/parent.config | 0 .../files/traffic_server/etc/plugin.config | 0 .../files/traffic_server/etc/records.config | 0 .../files/traffic_server/etc/sni.yaml | 0 .../files/traffic_server/etc/socks.config | 0 .../files/traffic_server/etc/splitdns.config | 0 .../traffic_server/etc/ssl_multicert.config | 0 .../files/traffic_server/etc/strategies.yaml | 0 .../traffic_server/etc/strip_headers.lua | 0 .../traffic_server/etc/trafficserver-release | 0 .../files/traffic_server/etc/volume.config | 0 .../files/traffic_server_configure.sh | 0 .../files/traffic_server_install.sh | 0 .../install_scripts/files/vector_configure.sh | 0 .../install_scripts/files/vector_install.sh | 0 .../server/install}/install_scripts/mod.rs | 0 .../workflows/server/install}/mod.rs | 0 .../workflows/server/install_complete.rs} | 2 + svc/pkg/cluster/src/workflows/server/mod.rs | 458 ++++++++++++++++ .../server}/nomad_node_drain_complete.rs | 2 + .../server}/nomad_node_registered.rs | 2 + .../workflows/server/taint.rs} | 0 .../workflows/server/undrain.rs} | 0 .../datacenter-tls-renew/Cargo.toml | 2 +- .../standalone/default-update/Cargo.toml | 5 +- svc/pkg/cluster/standalone/fix-tls/Cargo.toml | 9 +- svc/pkg/cluster/standalone/gc/Cargo.toml | 3 +- .../standalone/metrics-publish/Cargo.toml | 4 +- .../{worker/tests => testsTMP}/common.rs | 0 .../{worker/tests => testsTMP}/create.rs | 0 .../tests => testsTMP}/datacenter_create.rs | 0 .../datacenter_get.rs} | 0 .../datacenter_list.rs} | 0 .../datacenter_location_get.rs} | 0 .../datacenter_resolve_for_name_id.rs} | 0 .../tests => testsTMP}/datacenter_scale.rs | 0 .../datacenter_tls_get.rs} | 0 .../datacenter_tls_issue.rs | 0 .../datacenter_topology_get.rs} | 0 .../tests => testsTMP}/datacenter_update.rs | 0 .../{worker/tests => testsTMP}/game_link.rs | 0 .../tests/integration.rs => testsTMP/get.rs} | 0 .../get_for_game.rs} | 0 .../tests/integration.rs => testsTMP/list.rs} | 0 .../nomad_node_drain_complete.rs | 0 .../nomad_node_registered.rs | 0 .../resolve_for_name_id.rs} | 0 .../tests => testsTMP}/server_destroy.rs | 0 .../server_destroy_with_filter.rs} | 0 .../tests => testsTMP}/server_dns_create.rs | 0 .../tests => testsTMP}/server_dns_delete.rs | 0 .../tests => testsTMP}/server_drain.rs | 0 .../integration.rs => testsTMP/server_get.rs} | 0 .../tests => testsTMP}/server_install.rs | 0 .../server_install_complete.rs | 0 .../server_list.rs} | 0 .../tests => testsTMP}/server_provision.rs | 0 .../server_resolve_for_ip.rs} | 0 .../tests => testsTMP}/server_taint.rs | 0 .../tests => testsTMP}/server_undrain.rs | 0 svc/pkg/cluster/util/Cargo.toml | 18 - svc/pkg/cluster/worker/Cargo.toml | 4 +- svc/pkg/cluster/worker/src/lib.rs | 2 - svc/pkg/cluster/worker/src/util.rs | 6 - svc/pkg/cluster/worker/src/workers/create.rs | 33 -- .../worker/src/workers/datacenter_create.rs | 98 ---- .../worker/src/workers/datacenter_update.rs | 81 --- .../cluster/worker/src/workers/game_link.rs | 30 -- svc/pkg/cluster/worker/src/workers/mod.rs | 37 -- .../worker/src/workers/server_provision.rs | 314 ----------- .../Cargo.toml | 14 +- .../{ops/server-provision => }/Service.toml | 7 +- svc/pkg/linode/db/linode/Service.toml | 7 + .../migrations/20240705194302_init.down.sql | 0 .../migrations/20240705194302_init.up.sql | 28 + .../linode/ops/instance-type-get/Cargo.toml | 20 - .../linode/ops/instance-type-get/Service.toml | 10 - .../linode/ops/instance-type-get/src/lib.rs | 43 -- svc/pkg/linode/ops/server-destroy/Cargo.toml | 25 - .../linode/ops/server-destroy/Service.toml | 10 - svc/pkg/linode/ops/server-destroy/src/lib.rs | 72 --- .../linode/ops/server-provision/Cargo.toml | 25 - svc/pkg/linode/ops/server-provision/README.md | 5 - .../linode/ops/server-provision/src/lib.rs | 266 ---------- svc/pkg/linode/src/lib.rs | 15 + svc/pkg/linode/src/ops/instance_type_get.rs | 53 ++ svc/pkg/linode/src/ops/mod.rs | 1 + svc/pkg/linode/src/types.rs | 38 ++ svc/pkg/linode/{util/src => src/util}/api.rs | 74 +-- .../{util/src/lib.rs => src/util/client.rs} | 15 +- .../linode/{util/src => src/util}/consts.rs | 0 .../src/lib.rs => linode/src/util/mod.rs} | 44 +- svc/pkg/linode/src/workflows/image.rs | 0 svc/pkg/linode/src/workflows/mod.rs | 1 + .../workflows}/prebake_install_complete.rs | 0 .../workflows}/prebake_provision.rs | 0 svc/pkg/linode/src/workflows/server.rs | 493 ++++++++++++++++++ svc/pkg/linode/standalone/gc/Cargo.toml | 4 +- .../instance_type_get.rs} | 0 .../tests/prebake_install_complete.rs | 0 .../{worker => }/tests/prebake_provision.rs | 0 .../server_destroy.rs} | 0 .../server_provision.rs} | 0 svc/pkg/linode/util/Cargo.toml | 15 - svc/pkg/linode/worker/Cargo.toml | 3 +- svc/pkg/linode/worker/src/lib.rs | 1 - svc/pkg/linode/worker/src/workers/mod.rs | 4 - svc/pkg/monolith/standalone/worker/Cargo.toml | 4 +- svc/pkg/region/ops/get/Cargo.toml | 3 +- svc/pkg/region/ops/list-for-game/Cargo.toml | 3 +- svc/pkg/region/ops/list/Cargo.toml | 3 +- .../region/ops/resolve-for-game/Cargo.toml | 2 +- svc/pkg/region/ops/resolve/Cargo.toml | 2 +- svc/pkg/tier/ops/list/Cargo.toml | 7 +- 233 files changed, 3336 insertions(+), 3141 deletions(-) create mode 100644 docs/libraries/workflow/DESIGN.md create mode 100644 docs/libraries/workflow/GOTCHAS.md delete mode 100644 docs/libraries/workflow/SIGNALS.md create mode 100644 docs/libraries/workflow/SIGNALS_AND_MESSAGES.md create mode 100644 svc/pkg/cluster/Cargo.toml rename svc/pkg/cluster/{worker => }/Service.toml (78%) rename svc/pkg/cluster/{util => }/build.rs (81%) create mode 100644 svc/pkg/cluster/db/cluster/migrations/20240701225245_add_json.down.sql create mode 100644 svc/pkg/cluster/db/cluster/migrations/20240701225245_add_json.up.sql delete mode 100644 svc/pkg/cluster/ops/datacenter-get/Cargo.toml delete mode 100644 svc/pkg/cluster/ops/datacenter-get/Service.toml delete mode 100644 svc/pkg/cluster/ops/datacenter-get/src/lib.rs delete mode 100644 svc/pkg/cluster/ops/datacenter-list/Cargo.toml delete mode 100644 svc/pkg/cluster/ops/datacenter-list/Service.toml delete mode 100644 svc/pkg/cluster/ops/datacenter-list/src/lib.rs delete mode 100644 svc/pkg/cluster/ops/datacenter-location-get/Cargo.toml delete mode 100644 svc/pkg/cluster/ops/datacenter-location-get/Service.toml delete mode 100644 svc/pkg/cluster/ops/datacenter-location-get/src/lib.rs delete mode 100644 svc/pkg/cluster/ops/datacenter-resolve-for-name-id/Service.toml delete mode 100644 svc/pkg/cluster/ops/datacenter-resolve-for-name-id/src/lib.rs delete mode 100644 svc/pkg/cluster/ops/datacenter-tls-get/Cargo.toml delete mode 100644 svc/pkg/cluster/ops/datacenter-tls-get/Service.toml delete mode 100644 svc/pkg/cluster/ops/datacenter-tls-get/src/lib.rs delete mode 100644 svc/pkg/cluster/ops/datacenter-topology-get/Cargo.toml delete mode 100644 svc/pkg/cluster/ops/datacenter-topology-get/README.md delete mode 100644 svc/pkg/cluster/ops/datacenter-topology-get/Service.toml delete mode 100644 svc/pkg/cluster/ops/get-for-game/Cargo.toml delete mode 100644 svc/pkg/cluster/ops/get-for-game/Service.toml delete mode 100644 svc/pkg/cluster/ops/get-for-game/src/lib.rs delete mode 100644 svc/pkg/cluster/ops/get/Cargo.toml delete mode 100644 svc/pkg/cluster/ops/get/Service.toml delete mode 100644 svc/pkg/cluster/ops/get/src/lib.rs delete mode 100644 svc/pkg/cluster/ops/list/Cargo.toml delete mode 100644 svc/pkg/cluster/ops/list/Service.toml delete mode 100644 svc/pkg/cluster/ops/list/src/lib.rs delete mode 100644 svc/pkg/cluster/ops/resolve-for-name-id/Cargo.toml delete mode 100644 svc/pkg/cluster/ops/resolve-for-name-id/Service.toml delete mode 100644 svc/pkg/cluster/ops/resolve-for-name-id/src/lib.rs delete mode 100644 svc/pkg/cluster/ops/server-destroy-with-filter/Cargo.toml delete mode 100644 svc/pkg/cluster/ops/server-destroy-with-filter/Service.toml delete mode 100644 svc/pkg/cluster/ops/server-get/Cargo.toml delete mode 100644 svc/pkg/cluster/ops/server-get/Service.toml delete mode 100644 svc/pkg/cluster/ops/server-get/src/lib.rs delete mode 100644 svc/pkg/cluster/ops/server-list/Cargo.toml delete mode 100644 svc/pkg/cluster/ops/server-list/Service.toml delete mode 100644 svc/pkg/cluster/ops/server-list/src/lib.rs delete mode 100644 svc/pkg/cluster/ops/server-resolve-for-ip/Cargo.toml delete mode 100644 svc/pkg/cluster/ops/server-resolve-for-ip/Service.toml delete mode 100644 svc/pkg/cluster/ops/server-resolve-for-ip/src/lib.rs create mode 100644 svc/pkg/cluster/src/lib.rs create mode 100644 svc/pkg/cluster/src/ops/datacenter/get.rs create mode 100644 svc/pkg/cluster/src/ops/datacenter/list.rs create mode 100644 svc/pkg/cluster/src/ops/datacenter/location_get.rs create mode 100644 svc/pkg/cluster/src/ops/datacenter/mod.rs create mode 100644 svc/pkg/cluster/src/ops/datacenter/resolve_for_name_id.rs create mode 100644 svc/pkg/cluster/src/ops/datacenter/tls_get.rs rename svc/pkg/cluster/{ops/datacenter-topology-get/src/lib.rs => src/ops/datacenter/topology_get.rs} (75%) create mode 100644 svc/pkg/cluster/src/ops/get.rs create mode 100644 svc/pkg/cluster/src/ops/get_for_game.rs create mode 100644 svc/pkg/cluster/src/ops/list.rs create mode 100644 svc/pkg/cluster/src/ops/mod.rs create mode 100644 svc/pkg/cluster/src/ops/resolve_for_name_id.rs rename svc/pkg/cluster/{ops/server-destroy-with-filter/src/lib.rs => src/ops/server/destroy_with_filter.rs} (51%) create mode 100644 svc/pkg/cluster/src/ops/server/get.rs create mode 100644 svc/pkg/cluster/src/ops/server/list.rs create mode 100644 svc/pkg/cluster/src/ops/server/mod.rs create mode 100644 svc/pkg/cluster/src/ops/server/resolve_for_ip.rs create mode 100644 svc/pkg/cluster/src/types.rs rename svc/pkg/cluster/{util/src => src/util}/metrics.rs (100%) create mode 100644 svc/pkg/cluster/src/util/mod.rs rename svc/pkg/cluster/{util/src => src/util}/test.rs (100%) create mode 100644 svc/pkg/cluster/src/workflows/cluster.rs create mode 100644 svc/pkg/cluster/src/workflows/datacenter/mod.rs rename svc/pkg/cluster/{worker/src/workers/datacenter_scale.rs => src/workflows/datacenter/scale.rs} (70%) rename svc/pkg/cluster/{worker/src/workers/datacenter_tls_issue.rs => src/workflows/datacenter/tls_issue.rs} (71%) create mode 100644 svc/pkg/cluster/src/workflows/mod.rs rename svc/pkg/cluster/{worker/src/workers/server_destroy.rs => src/workflows/server/destroy.rs} (99%) rename svc/pkg/cluster/{worker/src/workers/server_dns_create.rs => src/workflows/server/dns_create.rs} (100%) rename svc/pkg/cluster/{worker/src/workers/server_dns_delete.rs => src/workflows/server/dns_delete.rs} (100%) rename svc/pkg/cluster/{worker/src/workers/server_drain.rs => src/workflows/server/drain.rs} (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/components/mod.rs (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/components/nomad.rs (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/components/ok_server.rs (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/components/rivet.rs (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/components/s3.rs (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/components/traefik.rs (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/components/traffic_server.rs (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/components/vector.rs (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/cni_plugins.sh (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/docker.sh (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/node_exporter.sh (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/nomad_configure.sh (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/nomad_install.sh (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/ok_server.sh (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/rivet_create_hook.sh (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/rivet_fetch_info.sh (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/rivet_fetch_tls.sh (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/sysctl.sh (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/traefik.sh (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/traefik_instance.sh (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/traffic_server/etc/cache.config (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/traffic_server/etc/hosting.config (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/traffic_server/etc/ip_allow.yaml (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/traffic_server/etc/logging.yaml (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/traffic_server/etc/parent.config (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/traffic_server/etc/plugin.config (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/traffic_server/etc/records.config (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/traffic_server/etc/sni.yaml (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/traffic_server/etc/socks.config (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/traffic_server/etc/splitdns.config (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/traffic_server/etc/ssl_multicert.config (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/traffic_server/etc/strategies.yaml (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/traffic_server/etc/strip_headers.lua (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/traffic_server/etc/trafficserver-release (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/traffic_server/etc/volume.config (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/traffic_server_configure.sh (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/traffic_server_install.sh (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/vector_configure.sh (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/files/vector_install.sh (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/install_scripts/mod.rs (100%) rename svc/pkg/cluster/{worker/src/workers/server_install => src/workflows/server/install}/mod.rs (100%) rename svc/pkg/cluster/{worker/src/workers/server_install_complete.rs => src/workflows/server/install_complete.rs} (95%) create mode 100644 svc/pkg/cluster/src/workflows/server/mod.rs rename svc/pkg/cluster/{worker/src/workers => src/workflows/server}/nomad_node_drain_complete.rs (92%) rename svc/pkg/cluster/{worker/src/workers => src/workflows/server}/nomad_node_registered.rs (97%) rename svc/pkg/cluster/{worker/src/workers/server_taint.rs => src/workflows/server/taint.rs} (100%) rename svc/pkg/cluster/{worker/src/workers/server_undrain.rs => src/workflows/server/undrain.rs} (100%) rename svc/pkg/cluster/{worker/tests => testsTMP}/common.rs (100%) rename svc/pkg/cluster/{worker/tests => testsTMP}/create.rs (100%) rename svc/pkg/cluster/{worker/tests => testsTMP}/datacenter_create.rs (100%) rename svc/pkg/cluster/{ops/datacenter-get/tests/integration.rs => testsTMP/datacenter_get.rs} (100%) rename svc/pkg/cluster/{ops/datacenter-list/tests/integration.rs => testsTMP/datacenter_list.rs} (100%) rename svc/pkg/cluster/{ops/datacenter-location-get/tests/integration.rs => testsTMP/datacenter_location_get.rs} (100%) rename svc/pkg/cluster/{ops/datacenter-resolve-for-name-id/tests/integration.rs => testsTMP/datacenter_resolve_for_name_id.rs} (100%) rename svc/pkg/cluster/{worker/tests => testsTMP}/datacenter_scale.rs (100%) rename svc/pkg/cluster/{ops/datacenter-tls-get/tests/integration.rs => testsTMP/datacenter_tls_get.rs} (100%) rename svc/pkg/cluster/{worker/tests => testsTMP}/datacenter_tls_issue.rs (100%) rename svc/pkg/cluster/{ops/datacenter-topology-get/tests/integration.rs => testsTMP/datacenter_topology_get.rs} (100%) rename svc/pkg/cluster/{worker/tests => testsTMP}/datacenter_update.rs (100%) rename svc/pkg/cluster/{worker/tests => testsTMP}/game_link.rs (100%) rename svc/pkg/cluster/{ops/get/tests/integration.rs => testsTMP/get.rs} (100%) rename svc/pkg/cluster/{ops/get-for-game/tests/integration.rs => testsTMP/get_for_game.rs} (100%) rename svc/pkg/cluster/{ops/list/tests/integration.rs => testsTMP/list.rs} (100%) rename svc/pkg/cluster/{worker/tests => testsTMP}/nomad_node_drain_complete.rs (100%) rename svc/pkg/cluster/{worker/tests => testsTMP}/nomad_node_registered.rs (100%) rename svc/pkg/cluster/{ops/resolve-for-name-id/tests/integration.rs => testsTMP/resolve_for_name_id.rs} (100%) rename svc/pkg/cluster/{worker/tests => testsTMP}/server_destroy.rs (100%) rename svc/pkg/cluster/{ops/server-destroy-with-filter/tests/integration.rs => testsTMP/server_destroy_with_filter.rs} (100%) rename svc/pkg/cluster/{worker/tests => testsTMP}/server_dns_create.rs (100%) rename svc/pkg/cluster/{worker/tests => testsTMP}/server_dns_delete.rs (100%) rename svc/pkg/cluster/{worker/tests => testsTMP}/server_drain.rs (100%) rename svc/pkg/cluster/{ops/server-get/tests/integration.rs => testsTMP/server_get.rs} (100%) rename svc/pkg/cluster/{worker/tests => testsTMP}/server_install.rs (100%) rename svc/pkg/cluster/{worker/tests => testsTMP}/server_install_complete.rs (100%) rename svc/pkg/cluster/{ops/server-list/tests/integration.rs => testsTMP/server_list.rs} (100%) rename svc/pkg/cluster/{worker/tests => testsTMP}/server_provision.rs (100%) rename svc/pkg/cluster/{ops/server-resolve-for-ip/tests/integration.rs => testsTMP/server_resolve_for_ip.rs} (100%) rename svc/pkg/cluster/{worker/tests => testsTMP}/server_taint.rs (100%) rename svc/pkg/cluster/{worker/tests => testsTMP}/server_undrain.rs (100%) delete mode 100644 svc/pkg/cluster/util/Cargo.toml delete mode 100644 svc/pkg/cluster/worker/src/lib.rs delete mode 100644 svc/pkg/cluster/worker/src/util.rs delete mode 100644 svc/pkg/cluster/worker/src/workers/create.rs delete mode 100644 svc/pkg/cluster/worker/src/workers/datacenter_create.rs delete mode 100644 svc/pkg/cluster/worker/src/workers/datacenter_update.rs delete mode 100644 svc/pkg/cluster/worker/src/workers/game_link.rs delete mode 100644 svc/pkg/cluster/worker/src/workers/mod.rs delete mode 100644 svc/pkg/cluster/worker/src/workers/server_provision.rs rename svc/pkg/{cluster/ops/datacenter-resolve-for-name-id => linode}/Cargo.toml (51%) rename svc/pkg/linode/{ops/server-provision => }/Service.toml (69%) create mode 100644 svc/pkg/linode/db/linode/Service.toml create mode 100644 svc/pkg/linode/db/linode/migrations/20240705194302_init.down.sql create mode 100644 svc/pkg/linode/db/linode/migrations/20240705194302_init.up.sql delete mode 100644 svc/pkg/linode/ops/instance-type-get/Cargo.toml delete mode 100644 svc/pkg/linode/ops/instance-type-get/Service.toml delete mode 100644 svc/pkg/linode/ops/instance-type-get/src/lib.rs delete mode 100644 svc/pkg/linode/ops/server-destroy/Cargo.toml delete mode 100644 svc/pkg/linode/ops/server-destroy/Service.toml delete mode 100644 svc/pkg/linode/ops/server-destroy/src/lib.rs delete mode 100644 svc/pkg/linode/ops/server-provision/Cargo.toml delete mode 100644 svc/pkg/linode/ops/server-provision/README.md delete mode 100644 svc/pkg/linode/ops/server-provision/src/lib.rs create mode 100644 svc/pkg/linode/src/lib.rs create mode 100644 svc/pkg/linode/src/ops/instance_type_get.rs create mode 100644 svc/pkg/linode/src/ops/mod.rs create mode 100644 svc/pkg/linode/src/types.rs rename svc/pkg/linode/{util/src => src/util}/api.rs (89%) rename svc/pkg/linode/{util/src/lib.rs => src/util/client.rs} (93%) rename svc/pkg/linode/{util/src => src/util}/consts.rs (100%) rename svc/pkg/{cluster/util/src/lib.rs => linode/src/util/mod.rs} (53%) create mode 100644 svc/pkg/linode/src/workflows/image.rs create mode 100644 svc/pkg/linode/src/workflows/mod.rs rename svc/pkg/linode/{worker/src/workers => src/workflows}/prebake_install_complete.rs (100%) rename svc/pkg/linode/{worker/src/workers => src/workflows}/prebake_provision.rs (100%) create mode 100644 svc/pkg/linode/src/workflows/server.rs rename svc/pkg/linode/{ops/instance-type-get/tests/integration.rs => tests/instance_type_get.rs} (100%) rename svc/pkg/linode/{worker => }/tests/prebake_install_complete.rs (100%) rename svc/pkg/linode/{worker => }/tests/prebake_provision.rs (100%) rename svc/pkg/linode/{ops/server-destroy/tests/integration.rs => tests/server_destroy.rs} (100%) rename svc/pkg/linode/{ops/server-provision/tests/integration.rs => tests/server_provision.rs} (100%) delete mode 100644 svc/pkg/linode/util/Cargo.toml delete mode 100644 svc/pkg/linode/worker/src/lib.rs delete mode 100644 svc/pkg/linode/worker/src/workers/mod.rs diff --git a/docs/libraries/workflow/DESIGN.md b/docs/libraries/workflow/DESIGN.md new file mode 100644 index 000000000..cf4c86881 --- /dev/null +++ b/docs/libraries/workflow/DESIGN.md @@ -0,0 +1,5 @@ +# Design + +## Hierarchy + +TODO diff --git a/docs/libraries/workflow/GLOSSARY.md b/docs/libraries/workflow/GLOSSARY.md index c03544218..5d79cf6eb 100644 --- a/docs/libraries/workflow/GLOSSARY.md +++ b/docs/libraries/workflow/GLOSSARY.md @@ -15,8 +15,9 @@ A collection of registered workflows. This is solely used for the worker to fetc A series of fallible executions of code (also known as activities), signal listeners, signal transmitters, or sub workflow triggers. -Workflows can be though of as a list of tasks. The code defining a workflow only specifies what items should -be ran; There is no complex logic (e.g. database queries) running within the top level of the workflow. +Workflows can be though of as an outline or a list of tasks. The code defining a workflow only specifies what +items should be ran; There is no complex logic (e.g. database queries) running within the top level of the +workflow. Upon an activity failure, workflow code can be reran without duplicate side effects because activities are cached and re-read after they succeed. @@ -27,6 +28,11 @@ A block of code that can fail. This cannot trigger other workflows or activities Activities are retried by workflows when they fail or replayed when they succeed but a later part of the workflow fails. +When choosing between a workflow and an activity: + +- Choose a workflow when there are multiple steps that need to be individually retried upon failure. +- Choose an activity when there is only one chunk of retryable code that needs to be executed. + ## Operation Effectively a native rust function. Can fail or not fail. Used for widely used operations like fetching a @@ -51,6 +57,10 @@ this signal for it to be picked up, otherwise it will stay in the database indef workflow. Signals do not have a response; another signal must be sent back from the workflow and listened to by the sender. +### Differences between message + +Signals are effectively just messages that can only be consumed by workflows. + ## Tagged Signal Same as a signal except it is sent with a JSON blob as its "tags" instead of to a specific workflow. Any @@ -65,6 +75,28 @@ See [the signals document](./SIGNALS.md). A "one of" for signal listening. Allows for listening to multiple signals at once and receiving the first one that gets sent. +## Message + +A payload that can be sent out of a workflow. Includes a JSON blob for tags which can be subscribed to with a +subscription. + +### Differences between signal + +Messages are effectively just signals that can be only consumed by non workflows. + +## Subscription + +An entity that waits for messages with the same (not a superset/subset) tags as itself. Upon receiving a +message, the message will be returned and the developer can choose to continue to listen for more messages. + +## Tail + +Reads the last message without waiting. If none exists (all previous messages expired), `None` is returned. + +## Tail w/ Anchor + +Reads the earliest message after the given anchor timestamp or waits for one to be published if none exist. + ## Workflow Event An action that gets executed in a workflow. An event can be a: diff --git a/docs/libraries/workflow/GOTCHAS.md b/docs/libraries/workflow/GOTCHAS.md new file mode 100644 index 000000000..6bada3223 --- /dev/null +++ b/docs/libraries/workflow/GOTCHAS.md @@ -0,0 +1,15 @@ +# Gotchas + +## Timestamps + +Use timestamps with care when passing them between activity inputs/outputs. Because activity inputs need to be +consistent for replays, use `util::timestamp::now()` only within activities and not workflow bodies. + +If you need a timestamp in a workflow body, use `ctx.create_ts()` for the creation of the workflow. using +`ctx.ts()` is inconsistent also because it marks the current workflow run (different between replays). + +If you need a consistent current timestamp, create a new activity that just returns `util::timestamp::now()`. +This will be the current timestamp on the first execution of the activity and wont change on replay. + +> **When an activity's input doesn't produce the same hash as the first time it was executed (i.e. its input +> changed), the entire workflow will error with "History Diverged" and will not restart.** diff --git a/docs/libraries/workflow/SIGNALS.md b/docs/libraries/workflow/SIGNALS.md deleted file mode 100644 index cc71587d3..000000000 --- a/docs/libraries/workflow/SIGNALS.md +++ /dev/null @@ -1,7 +0,0 @@ -# Signals - -## Tagged signals - -Tagged signals are consumed on a first-come-first-serve basis because a single signal being consumed by more -than one workflow is not a supported design pattern. To work around this, consume the signal by a workflow -then publish multiple signals from that workflow. diff --git a/docs/libraries/workflow/SIGNALS_AND_MESSAGES.md b/docs/libraries/workflow/SIGNALS_AND_MESSAGES.md new file mode 100644 index 000000000..d9127454f --- /dev/null +++ b/docs/libraries/workflow/SIGNALS_AND_MESSAGES.md @@ -0,0 +1,29 @@ +# Signals + +## Tagged signals + +Tagged signals are consumed on a first-come-first-serve basis because a single signal being consumed by more +than one workflow is not a supported design pattern. To work around this, consume the signal by a workflow +then publish multiple signals from that workflow. + +# Choosing Between Signals and Messages + +> **Note**: non-workflow ecosystem is API layer, standalone, operations, old workers + +## Signal + +- Sending data from the non-workflow ecosystem to the workflow ecosystem +- Sending data from the workflow ecosystem to somewhere else in the workflow ecosystem + +## Message + +- Sending data from the workflow ecosystem to the non-workflow ecosystem + +## Both Signals and Messages + +Sometimes you may need to listen for a particular event in the workflow system and the non-workflow ecosystem. +In this case you can publish both a signal and a message (you can derive `signal` and `message` on the same +struct to make this easier). + +Both messages and signals are meant to be payloads with a specific recipient. They are not meant to be +published without an intended target (i.e. any listener can consume). diff --git a/lib/bolt/core/src/tasks/gen.rs b/lib/bolt/core/src/tasks/gen.rs index 9511dba8d..eac2916df 100644 --- a/lib/bolt/core/src/tasks/gen.rs +++ b/lib/bolt/core/src/tasks/gen.rs @@ -162,12 +162,6 @@ async fn generate_root(path: &Path) { } } } - - // Utils lib - let util_path = pkg.path().join("util"); - if fs::metadata(&util_path).await.is_ok() { - set_license(&util_path.join("Cargo.toml")).await; - } } } diff --git a/lib/chirp-workflow/core/src/ctx/activity.rs b/lib/chirp-workflow/core/src/ctx/activity.rs index 858f85414..d04f37565 100644 --- a/lib/chirp-workflow/core/src/ctx/activity.rs +++ b/lib/chirp-workflow/core/src/ctx/activity.rs @@ -122,6 +122,10 @@ impl ActivityCtx { self.name } + pub fn workflow_id(&self) -> Uuid { + self.workflow_id + } + pub fn req_id(&self) -> Uuid { self.op_ctx.req_id() } diff --git a/lib/chirp-workflow/core/src/ctx/operation.rs b/lib/chirp-workflow/core/src/ctx/operation.rs index 4094d77a2..ae960e082 100644 --- a/lib/chirp-workflow/core/src/ctx/operation.rs +++ b/lib/chirp-workflow/core/src/ctx/operation.rs @@ -4,6 +4,7 @@ use uuid::Uuid; use crate::{DatabaseHandle, Operation, OperationInput, WorkflowError}; +#[derive(Clone)] pub struct OperationCtx { ray_id: Uuid, name: &'static str, diff --git a/lib/chirp-workflow/core/src/ctx/workflow.rs b/lib/chirp-workflow/core/src/ctx/workflow.rs index a6dbd78f8..0b750b625 100644 --- a/lib/chirp-workflow/core/src/ctx/workflow.rs +++ b/lib/chirp-workflow/core/src/ctx/workflow.rs @@ -9,7 +9,8 @@ use crate::{ activity::ActivityId, event::Event, util::{self, Location}, - Activity, ActivityCtx, ActivityInput, DatabaseHandle, Executable, Listen, PulledWorkflow, + executable::{closure, Executable, AsyncResult}, + Activity, ActivityCtx, ActivityInput, DatabaseHandle, Listen, PulledWorkflow, RegistryHandle, Signal, SignalRow, Workflow, WorkflowError, WorkflowInput, WorkflowResult, }; @@ -28,12 +29,13 @@ const DB_ACTION_RETRY: Duration = Duration::from_millis(150); // Most db action retries const MAX_DB_ACTION_RETRIES: usize = 5; -// TODO: Use generics to store input instead of a string +// TODO: Use generics to store input instead of a json value +// NOTE: Clonable because of inner arcs #[derive(Clone)] pub struct WorkflowCtx { - pub workflow_id: Uuid, + workflow_id: Uuid, /// Name of the workflow to run in the registry. - pub name: String, + name: String, create_ts: i64, ts: i64, ray_id: Uuid, @@ -663,6 +665,17 @@ impl WorkflowCtx { exec.execute(self).await } + /// Spawns a new thread to execute workflow steps in. + pub fn spawn(&mut self, f: F) -> tokio::task::JoinHandle> + where + F: for<'a> FnOnce(&'a mut WorkflowCtx) -> AsyncResult<'a, T> + Send + 'static + { + let mut ctx = self.clone(); + tokio::task::spawn(async move { + closure(f).execute(&mut ctx).await + }) + } + /// Sends a signal. pub async fn signal( &mut self, @@ -789,6 +802,14 @@ impl WorkflowCtx { } impl WorkflowCtx { + pub fn name(&self) -> &str { + &self.name + } + + pub fn workflow_id(&self) -> Uuid { + self.workflow_id + } + /// Timestamp at which this workflow run started. pub fn ts(&self) -> i64 { self.ts diff --git a/lib/chirp-workflow/core/src/executable.rs b/lib/chirp-workflow/core/src/executable.rs index e56e68785..dde5e56a1 100644 --- a/lib/chirp-workflow/core/src/executable.rs +++ b/lib/chirp-workflow/core/src/executable.rs @@ -14,9 +14,9 @@ pub trait Executable: Send { async fn execute(self, ctx: &mut WorkflowCtx) -> GlobalResult; } -type AsyncResult<'a, T> = Pin> + Send + 'a>>; +pub type AsyncResult<'a, T> = Pin> + Send + 'a>>; -// Closure executuable impl +// Closure executable impl #[async_trait] impl Executable for F where @@ -76,7 +76,7 @@ struct TupleHelper { // Must wrap all closured being used as executables in this function due to // https://github.com/rust-lang/rust/issues/70263 -pub fn closure(f: F) -> F +pub fn closure(f: F) -> F where F: for<'a> FnOnce(&'a mut WorkflowCtx) -> AsyncResult<'a, T> + Send, { diff --git a/lib/chirp-workflow/core/src/signal.rs b/lib/chirp-workflow/core/src/signal.rs index d95dec106..43c088a34 100644 --- a/lib/chirp-workflow/core/src/signal.rs +++ b/lib/chirp-workflow/core/src/signal.rs @@ -44,14 +44,14 @@ pub trait Listen: Sized { /// ```` #[macro_export] macro_rules! join_signal { - (pub $join:ident, [$($signals:ident),*]) => { + (pub $join:ident, [$($signals:ident),* $(,)?]) => { pub enum $join { $($signals($signals)),* } join_signal!(@ $join, [$($signals),*]); }; - ($join:ident, [$($signals:ident),*]) => { + ($join:ident, [$($signals:ident),* $(,)?]) => { enum $join { $($signals($signals)),* } diff --git a/lib/chirp-workflow/macros/src/lib.rs b/lib/chirp-workflow/macros/src/lib.rs index 51500abe5..890657c4d 100644 --- a/lib/chirp-workflow/macros/src/lib.rs +++ b/lib/chirp-workflow/macros/src/lib.rs @@ -284,8 +284,16 @@ pub fn signal(attr: TokenStream, item: TokenStream) -> TokenStream { let struct_ident = &item_struct.ident; + // If also a message, don't derive serde traits + let also_message = item_struct.attrs.iter().filter_map(|attr| attr.path().segments.last()).any(|seg| seg.ident == "message"); + let serde_derive = if also_message { + quote! {} + } else { + quote!{ #[derive(serde::Serialize, serde::Deserialize)] } + }; + let expanded = quote! { - #[derive(serde::Serialize, serde::Deserialize)] + #serde_derive #item_struct impl Signal for #struct_ident { @@ -293,9 +301,9 @@ pub fn signal(attr: TokenStream, item: TokenStream) -> TokenStream { } #[async_trait::async_trait] - impl Listen for #struct_ident { + impl Listen for #struct_ident { async fn listen(ctx: &mut chirp_workflow::prelude::WorkflowCtx) -> chirp_workflow::prelude::WorkflowResult { - let row = ctx.listen_any(&[Self::NAME]).await?; + let row = ctx.listen_any(&[::NAME]).await?; Self::parse(&row.signal_name, row.body) } @@ -313,6 +321,14 @@ pub fn message(attr: TokenStream, item: TokenStream) -> TokenStream { let name = parse_macro_input!(attr as LitStr); let item_struct = parse_macro_input!(item as ItemStruct); + // If also a signal, don't derive serde traits + let also_signal = item_struct.attrs.iter().filter_map(|attr| attr.path().segments.last()).any(|seg| seg.ident == "signal"); + let serde_derive = if also_signal { + quote! {} + } else { + quote!{ #[derive(serde::Serialize, serde::Deserialize)] } + }; + let config = match parse_msg_config(&item_struct.attrs) { Ok(x) => x, Err(err) => return err.into_compile_error().into(), @@ -322,25 +338,14 @@ pub fn message(attr: TokenStream, item: TokenStream) -> TokenStream { let tail_ttl = config.tail_ttl; let expanded = quote! { - #[derive(Debug, serde::Serialize, serde::Deserialize)] + #serde_derive + #[derive(Debug)] #item_struct impl Message for #struct_ident { const NAME: &'static str = #name; const TAIL_TTL: std::time::Duration = std::time::Duration::from_secs(#tail_ttl); } - - #[async_trait::async_trait] - impl Listen for #struct_ident { - async fn listen(ctx: &mut chirp_workflow::prelude::WorkflowCtx) -> chirp_workflow::prelude::WorkflowResult { - let row = ctx.listen_any(&[Self::NAME]).await?; - Self::parse(&row.signal_name, row.body) - } - - fn parse(_name: &str, body: serde_json::Value) -> chirp_workflow::prelude::WorkflowResult { - serde_json::from_value(body).map_err(WorkflowError::DeserializeActivityOutput) - } - } }; TokenStream::from(expanded) diff --git a/proto/backend/cluster.proto b/proto/backend/cluster.proto index 8c901f555..01b73df6d 100644 --- a/proto/backend/cluster.proto +++ b/proto/backend/cluster.proto @@ -94,3 +94,8 @@ message ServerFilter { bool filter_public_ips = 9; repeated string public_ips = 10; } + +// Helper proto for writing to sql +message Pools { + repeated rivet.backend.cluster.Pool pools = 1; +} diff --git a/svc/Cargo.lock b/svc/Cargo.lock index 275cac882..3bbf9d01d 100644 --- a/svc/Cargo.lock +++ b/svc/Cargo.lock @@ -103,14 +103,7 @@ dependencies = [ "chirp-client", "chirp-workflow", "chrono", - "cluster-datacenter-get", - "cluster-datacenter-list", - "cluster-datacenter-resolve-for-name-id", - "cluster-get", - "cluster-list", - "cluster-server-destroy-with-filter", - "cluster-server-get", - "cluster-server-list", + "cluster", "http 0.2.12", "hyper", "lazy_static", @@ -125,7 +118,6 @@ dependencies = [ "rivet-matchmaker", "rivet-operation", "rivet-pools", - "rivet-util-cluster", "rivet-util-mm", "s3-util", "serde", @@ -253,7 +245,7 @@ dependencies = [ "cloud-namespace-token-public-create", "cloud-version-get", "cloud-version-publish", - "cluster-datacenter-list", + "cluster", "custom-user-avatar-list-for-game", "custom-user-avatar-upload-complete", "faker-region", @@ -307,7 +299,6 @@ dependencies = [ "rivet-health-checks", "rivet-operation", "rivet-pools", - "rivet-util-cluster", "rivet-util-job", "rivet-util-mm", "rivet-util-nsfw", @@ -777,10 +768,7 @@ dependencies = [ "async-trait", "chirp-client", "chrono", - "cluster-datacenter-get", - "cluster-datacenter-tls-get", - "cluster-server-get", - "cluster-server-resolve-for-ip", + "cluster", "http 0.2.12", "hyper", "lazy_static", @@ -791,7 +779,6 @@ dependencies = [ "rivet-health-checks", "rivet-operation", "rivet-pools", - "rivet-util-cluster", "serde", "serde_json", "thiserror", @@ -859,7 +846,7 @@ dependencies = [ "cdn-namespace-domain-create", "chirp-client", "chrono", - "cluster-server-list", + "cluster", "faker-cdn-site", "faker-game", "faker-game-namespace", @@ -2400,58 +2387,30 @@ dependencies = [ ] [[package]] -name = "cluster-datacenter-get" +name = "cluster" version = "0.0.1" dependencies = [ - "chirp-client", - "chirp-worker", - "prost 0.10.4", - "rivet-operation", - "sqlx", -] - -[[package]] -name = "cluster-datacenter-list" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "prost 0.10.4", - "rivet-operation", - "sqlx", -] - -[[package]] -name = "cluster-datacenter-location-get" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", + "acme-lib", + "anyhow", + "chirp-workflow", + "cloudflare", + "hex", + "http 0.2.12", "ip-info", + "lazy_static", + "linode", + "merkle_hash", + "nomad-util", + "nomad_client", + "rand", + "rivet-metrics", "rivet-operation", + "rivet-runtime", + "serde", "sqlx", -] - -[[package]] -name = "cluster-datacenter-resolve-for-name-id" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "prost 0.10.4", - "rivet-operation", - "sqlx", -] - -[[package]] -name = "cluster-datacenter-tls-get" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "prost 0.10.4", - "rivet-operation", - "sqlx", + "thiserror", + "tokio", + "trust-dns-resolver", ] [[package]] @@ -2460,7 +2419,7 @@ version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", - "cluster-datacenter-get", + "cluster", "rivet-connection", "rivet-health-checks", "rivet-metrics", @@ -2472,35 +2431,18 @@ dependencies = [ "tracing-subscriber", ] -[[package]] -name = "cluster-datacenter-topology-get" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "lazy_static", - "nomad-util", - "nomad_client", - "prost 0.10.4", - "rivet-operation", - "sqlx", -] - [[package]] name = "cluster-default-update" version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", - "cluster-datacenter-get", - "cluster-datacenter-list", - "cluster-get", + "cluster", "prost 0.10.4", "reqwest", "rivet-connection", "rivet-operation", "rivet-pools", - "rivet-util-cluster", "serde", "serde_json", "tokio", @@ -2519,16 +2461,12 @@ dependencies = [ "chirp-worker", "chrono", "cloudflare", - "cluster-datacenter-get", - "cluster-datacenter-list", - "cluster-datacenter-topology-get", + "cluster", "http 0.2.12", "include_dir", "indoc 1.0.9", "lazy_static", - "linode-instance-type-get", - "linode-server-destroy", - "linode-server-provision", + "linode", "maplit", "nomad-util", "openssl", @@ -2538,7 +2476,6 @@ dependencies = [ "rivet-metrics", "rivet-operation", "rivet-runtime", - "rivet-util-cluster", "s3-util", "serde_yaml", "ssh2", @@ -2556,165 +2493,36 @@ version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", - "cluster-datacenter-get", + "cluster", "rivet-connection", "rivet-health-checks", "rivet-metrics", "rivet-operation", "rivet-runtime", - "rivet-util-cluster", "sqlx", "tokio", "tracing", "tracing-subscriber", ] -[[package]] -name = "cluster-get" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "prost 0.10.4", - "rivet-operation", - "sqlx", -] - -[[package]] -name = "cluster-get-for-game" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "prost 0.10.4", - "rivet-operation", - "rivet-util-cluster", - "sqlx", -] - -[[package]] -name = "cluster-list" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "prost 0.10.4", - "rivet-operation", - "sqlx", -] - [[package]] name = "cluster-metrics-publish" version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", - "cluster-datacenter-get", + "cluster", "rivet-connection", "rivet-health-checks", "rivet-metrics", "rivet-operation", "rivet-runtime", - "rivet-util-cluster", "sqlx", "tokio", "tracing", "tracing-subscriber", ] -[[package]] -name = "cluster-resolve-for-name-id" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "prost 0.10.4", - "rivet-operation", - "sqlx", -] - -[[package]] -name = "cluster-server-destroy-with-filter" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "cluster-server-list", - "rivet-operation", - "sqlx", -] - -[[package]] -name = "cluster-server-get" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "prost 0.10.4", - "rivet-operation", - "sqlx", -] - -[[package]] -name = "cluster-server-list" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "prost 0.10.4", - "rivet-operation", - "sqlx", -] - -[[package]] -name = "cluster-server-resolve-for-ip" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "prost 0.10.4", - "rivet-operation", - "sqlx", -] - -[[package]] -name = "cluster-worker" -version = "0.0.1" -dependencies = [ - "acme-lib", - "anyhow", - "chirp-client", - "chirp-worker", - "chrono", - "cloudflare", - "cluster-datacenter-get", - "cluster-datacenter-list", - "cluster-datacenter-topology-get", - "http 0.2.12", - "include_dir", - "indoc 1.0.9", - "lazy_static", - "linode-instance-type-get", - "linode-server-destroy", - "linode-server-provision", - "maplit", - "nomad-util", - "nomad_client", - "openssl", - "rivet-convert", - "rivet-health-checks", - "rivet-metrics", - "rivet-runtime", - "rivet-util-cluster", - "s3-util", - "serde_yaml", - "sqlx", - "ssh2", - "thiserror", - "token-create", - "trust-dns-resolver", -] - [[package]] name = "combine" version = "4.6.6" @@ -5102,84 +4910,40 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] -name = "linode-gc" +name = "linode" version = "0.0.1" dependencies = [ - "chirp-client", - "chirp-worker", + "chirp-workflow", "chrono", + "cluster", + "rand", "reqwest", - "rivet-connection", - "rivet-health-checks", - "rivet-metrics", - "rivet-operation", - "rivet-runtime", - "rivet-util-cluster", - "rivet-util-linode", "serde", "serde_json", "sqlx", - "tokio", - "tracing", - "tracing-subscriber", -] - -[[package]] -name = "linode-instance-type-get" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "rivet-operation", - "rivet-util-cluster", - "rivet-util-linode", - "sqlx", -] - -[[package]] -name = "linode-server-destroy" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "cluster-datacenter-get", - "linode-server-provision", - "reqwest", - "rivet-operation", - "rivet-util-cluster", - "rivet-util-linode", - "sqlx", + "ssh-key", ] [[package]] -name = "linode-server-provision" +name = "linode-gc" version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", - "cluster-datacenter-get", - "linode-server-destroy", + "chrono", + "linode", "reqwest", - "rivet-operation", - "rivet-util-cluster", - "rivet-util-linode", - "sqlx", -] - -[[package]] -name = "linode-worker" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "cluster-datacenter-get", - "rivet-convert", + "rivet-connection", "rivet-health-checks", "rivet-metrics", + "rivet-operation", "rivet-runtime", - "rivet-util-cluster", - "rivet-util-linode", + "serde", + "serde_json", "sqlx", + "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -5915,13 +5679,13 @@ dependencies = [ "cf-custom-hostname-worker", "chirp-client", "cloud-worker", - "cluster-worker", + "cluster", "external-worker", "game-user-worker", "job-log-worker", "job-run-worker", "kv-worker", - "linode-worker", + "linode", "mm-worker", "rivet-connection", "rivet-health-checks", @@ -6938,8 +6702,7 @@ version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", - "cluster-datacenter-get", - "cluster-datacenter-location-get", + "cluster", "faker-region", "prost 0.10.4", "rivet-operation", @@ -6952,11 +6715,10 @@ version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", - "cluster-datacenter-list", + "cluster", "faker-region", "prost 0.10.4", "rivet-operation", - "rivet-util-cluster", "sqlx", ] @@ -6966,8 +6728,7 @@ version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", - "cluster-datacenter-list", - "cluster-get-for-game", + "cluster", "faker-region", "prost 0.10.4", "rivet-operation", @@ -6995,7 +6756,7 @@ version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", - "cluster-datacenter-get", + "cluster", "faker-region", "prost 0.10.4", "region-get", @@ -7010,7 +6771,7 @@ version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", - "cluster-datacenter-get", + "cluster", "faker-region", "prost 0.10.4", "region-get", @@ -7567,20 +7328,6 @@ dependencies = [ name = "rivet-util-cdn" version = "0.1.0" -[[package]] -name = "rivet-util-cluster" -version = "0.1.0" -dependencies = [ - "hex", - "lazy_static", - "merkle_hash", - "rivet-metrics", - "rivet-util", - "tokio", - "types", - "uuid", -] - [[package]] name = "rivet-util-env" version = "0.1.0" @@ -7612,19 +7359,6 @@ dependencies = [ name = "rivet-util-kv" version = "0.1.0" -[[package]] -name = "rivet-util-linode" -version = "0.1.0" -dependencies = [ - "chrono", - "rand", - "reqwest", - "rivet-operation", - "serde", - "serde_json", - "ssh-key", -] - [[package]] name = "rivet-util-macros" version = "0.1.0" @@ -8934,12 +8668,10 @@ version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", - "cluster-datacenter-get", - "cluster-datacenter-list", - "linode-instance-type-get", + "cluster", + "linode", "prost 0.10.4", "rivet-operation", - "rivet-util-cluster", ] [[package]] diff --git a/svc/Cargo.toml b/svc/Cargo.toml index 44e05006b..0ff8959e1 100644 --- a/svc/Cargo.toml +++ b/svc/Cargo.toml @@ -59,26 +59,12 @@ members = [ "pkg/cloud/ops/version-get", "pkg/cloud/ops/version-publish", "pkg/cloud/worker", - "pkg/cluster/ops/datacenter-get", - "pkg/cluster/ops/datacenter-list", - "pkg/cluster/ops/datacenter-location-get", - "pkg/cluster/ops/datacenter-resolve-for-name-id", - "pkg/cluster/ops/datacenter-tls-get", - "pkg/cluster/ops/datacenter-topology-get", - "pkg/cluster/ops/get", - "pkg/cluster/ops/get-for-game", - "pkg/cluster/ops/list", - "pkg/cluster/ops/resolve-for-name-id", - "pkg/cluster/ops/server-destroy-with-filter", - "pkg/cluster/ops/server-get", - "pkg/cluster/ops/server-list", - "pkg/cluster/ops/server-resolve-for-ip", + "pkg/cluster", "pkg/cluster/standalone/datacenter-tls-renew", "pkg/cluster/standalone/default-update", "pkg/cluster/standalone/fix-tls", "pkg/cluster/standalone/gc", "pkg/cluster/standalone/metrics-publish", - "pkg/cluster/worker", "pkg/custom-user-avatar/ops/list-for-game", "pkg/custom-user-avatar/ops/upload-complete", "pkg/debug/ops/email-res", @@ -151,11 +137,8 @@ members = [ "pkg/kv/ops/get", "pkg/kv/ops/list", "pkg/kv/worker", - "pkg/linode/ops/instance-type-get", - "pkg/linode/ops/server-destroy", - "pkg/linode/ops/server-provision", + "pkg/linode", "pkg/linode/standalone/gc", - "pkg/linode/worker", "pkg/load-test/standalone/api-cloud", "pkg/load-test/standalone/mm", "pkg/load-test/standalone/mm-sustain", diff --git a/svc/api/admin/Cargo.toml b/svc/api/admin/Cargo.toml index ced0d77b6..697fa1c95 100644 --- a/svc/api/admin/Cargo.toml +++ b/svc/api/admin/Cargo.toml @@ -39,17 +39,9 @@ tracing-subscriber = { version = "0.3", default-features = false, features = [ ] } url = "2.2.2" uuid = { version = "1", features = ["v4"] } -util-cluster = { package = "rivet-util-cluster", path = "../../pkg/cluster/util" } util-mm = { package = "rivet-util-mm", path = "../../pkg/mm/util" } -cluster-get = { path = "../../pkg/cluster/ops/get" } -cluster-list = { path = "../../pkg/cluster/ops/list" } -cluster-server-get = { path = "../../pkg/cluster/ops/server-get" } -cluster-server-destroy-with-filter = { path = "../../pkg/cluster/ops/server-destroy-with-filter" } -cluster-server-list = { path = "../../pkg/cluster/ops/server-list" } -cluster-datacenter-list = { path = "../../pkg/cluster/ops/datacenter-list" } -cluster-datacenter-get = { path = "../../pkg/cluster/ops/datacenter-get" } -cluster-datacenter-resolve-for-name-id = { path = "../../pkg/cluster/ops/datacenter-resolve-for-name-id" } +cluster = { path = "../../pkg/cluster" } token-create = { path = "../../pkg/token/ops/create" } [dev-dependencies] diff --git a/svc/api/cloud/Cargo.toml b/svc/api/cloud/Cargo.toml index 5aad793ab..b7f8dc162 100644 --- a/svc/api/cloud/Cargo.toml +++ b/svc/api/cloud/Cargo.toml @@ -34,7 +34,6 @@ tokio = { version = "1.29" } tracing = "0.1" tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt", "json", "ansi"] } url = "2.2.2" -util-cluster = { package = "rivet-util-cluster", path = "../../pkg/cluster/util" } util-job = { package = "rivet-util-job", path = "../../pkg/job/util" } util-mm = { package = "rivet-util-mm", path = "../../pkg/mm/util" } util-nsfw = { package = "rivet-util-nsfw", path = "../../pkg/nsfw/util" } @@ -64,7 +63,7 @@ cloud-namespace-token-development-create = { path = "../../pkg/cloud/ops/namespa cloud-namespace-token-public-create = { path = "../../pkg/cloud/ops/namespace-token-public-create" } cloud-version-get = { path = "../../pkg/cloud/ops/version-get" } cloud-version-publish = { path = "../../pkg/cloud/ops/version-publish" } -cluster-datacenter-list = { path = "../../pkg/cluster/ops/datacenter-list" } +cluster = { path = "../../pkg/cluster" } custom-user-avatar-list-for-game = { path = "../../pkg/custom-user-avatar/ops/list-for-game" } custom-user-avatar-upload-complete = { path = "../../pkg/custom-user-avatar/ops/upload-complete" } game-banner-upload-complete = { path = "../../pkg/game/ops/banner-upload-complete" } diff --git a/svc/api/provision/Cargo.toml b/svc/api/provision/Cargo.toml index c5a862320..56e4bdb2b 100644 --- a/svc/api/provision/Cargo.toml +++ b/svc/api/provision/Cargo.toml @@ -28,10 +28,6 @@ tracing = "0.1" tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt", "json", "ansi"] } url = "2.2.2" uuid = { version = "1", features = ["v4"] } -util-cluster = { package = "rivet-util-cluster", path = "../../pkg/cluster/util" } -cluster-datacenter-get = { path = "../../pkg/cluster/ops/datacenter-get" } -cluster-datacenter-tls-get = { path = "../../pkg/cluster/ops/datacenter-tls-get" } -cluster-server-get = { path = "../../pkg/cluster/ops/server-get" } -cluster-server-resolve-for-ip = { path = "../../pkg/cluster/ops/server-resolve-for-ip" } +cluster = { path = "../../pkg/cluster" } diff --git a/svc/api/traefik-provider/Cargo.toml b/svc/api/traefik-provider/Cargo.toml index 69708ac00..391695a0c 100644 --- a/svc/api/traefik-provider/Cargo.toml +++ b/svc/api/traefik-provider/Cargo.toml @@ -37,7 +37,7 @@ util-cdn = { package = "rivet-util-cdn", path = "../../pkg/cdn/util" } util-job = { package = "rivet-util-job", path = "../../pkg/job/util" } uuid = { version = "1", features = ["v4"] } -cluster-server-list = { path = "../../pkg/cluster/ops/server-list" } +cluster = { path = "../../pkg/cluster" } [dev-dependencies] rivet-connection = { path = "../../../lib/connection" } diff --git a/svc/pkg/cluster/Cargo.toml b/svc/pkg/cluster/Cargo.toml new file mode 100644 index 000000000..840a27587 --- /dev/null +++ b/svc/pkg/cluster/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "cluster" +version = "0.0.1" +edition = "2018" +authors = ["Rivet Gaming, LLC "] +license = "Apache-2.0" + +[dependencies] +acme-lib = "0.9" +anyhow = "1.0" +chirp-workflow = { path = "../../../lib/chirp-workflow/core" } +cloudflare = "0.10.1" +http = "0.2" +lazy_static = "1.4" +nomad-util = { path = "../../../lib/nomad-util" } +rand = "0.8" +rivet-metrics = { path = "../../../lib/metrics" } +rivet-operation = { path = "../../../lib/operation/core" } +rivet-runtime = { path = "../../../lib/runtime" } +serde = { version = "1.0.198", features = ["derive"] } +thiserror = "1.0" +trust-dns-resolver = { version = "0.23.2", features = ["dns-over-native-tls"] } + +ip-info = { path = "../ip/ops/info" } +linode = { path = "../linode" } + +[dependencies.nomad_client] +git = "https://github.com/rivet-gg/nomad-client" +rev = "abb66bf0c30c7ff5b0c695dae952481c33e538b5" # pragma: allowlist secret + +[dependencies.sqlx] +git = "https://github.com/rivet-gg/sqlx" +rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" +default-features = false +features = [ "json", "ipnetwork" ] + +[build-dependencies] +merkle_hash = "3.6" +hex = "0.4" +tokio = { version = "1.29", features = ["full"] } diff --git a/svc/pkg/cluster/worker/Service.toml b/svc/pkg/cluster/Service.toml similarity index 78% rename from svc/pkg/cluster/worker/Service.toml rename to svc/pkg/cluster/Service.toml index d4fb93058..c4e73d04c 100644 --- a/svc/pkg/cluster/worker/Service.toml +++ b/svc/pkg/cluster/Service.toml @@ -1,10 +1,10 @@ [service] -name = "cluster-worker" +name = "cluster" [runtime] kind = "rust" -[consumer] +[package] [secrets] "rivet/api_traefik_provider/token" = {} @@ -12,4 +12,4 @@ kind = "rust" "ssh/server/private_key_openssh" = {} [databases] -bucket-build = {} +db-cluster = {} diff --git a/svc/pkg/cluster/util/build.rs b/svc/pkg/cluster/build.rs similarity index 81% rename from svc/pkg/cluster/util/build.rs rename to svc/pkg/cluster/build.rs index eed68bebb..d454c7c22 100644 --- a/svc/pkg/cluster/util/build.rs +++ b/svc/pkg/cluster/build.rs @@ -1,7 +1,7 @@ use std::path::PathBuf; -use tokio::fs; use merkle_hash::MerkleTree; +use tokio::fs; // NOTE: This only gets the hash of the folder. Any template variables changed in the install scripts // will not update the hash. @@ -10,15 +10,11 @@ use merkle_hash::MerkleTree; async fn main() { let out_dir = PathBuf::from(std::env::var("OUT_DIR").unwrap()); let current_dir = std::env::current_dir().unwrap(); - let server_install_path = { - let mut dir = current_dir.clone(); - dir.pop(); - - dir.join("worker") - .join("src") - .join("workers") - .join("server_install") - }; + let server_install_path = current_dir + .join("src") + .join("workflows") + .join("server") + .join("install"); // Add rereun statement println!("cargo:rerun-if-changed={}", server_install_path.display()); diff --git a/svc/pkg/cluster/db/cluster/migrations/20240701225245_add_json.down.sql b/svc/pkg/cluster/db/cluster/migrations/20240701225245_add_json.down.sql new file mode 100644 index 000000000..e69de29bb diff --git a/svc/pkg/cluster/db/cluster/migrations/20240701225245_add_json.up.sql b/svc/pkg/cluster/db/cluster/migrations/20240701225245_add_json.up.sql new file mode 100644 index 000000000..5c8486dec --- /dev/null +++ b/svc/pkg/cluster/db/cluster/migrations/20240701225245_add_json.up.sql @@ -0,0 +1,13 @@ +ALTER TABLE datacenters + ADD COLUMN pools2 JSONB, -- Vec + ADD COLUMN provider2 JSONB, -- cluster::types::Provider + ADD COLUMN build_delivery_method2 JSONB; -- cluster::types::BuildDeliveryMethod + +ALTER TABLE servers + ADD COLUMN pool_type2 JSONB; -- cluster::types::PoolType + +-- Moved to db-linode +DROP TABLE server_images_linode; + +ALTER TABLE datacenter_tls + ADD COLUMN state2 JSONB; -- cluster::types::TlsState diff --git a/svc/pkg/cluster/ops/datacenter-get/Cargo.toml b/svc/pkg/cluster/ops/datacenter-get/Cargo.toml deleted file mode 100644 index 7e1073ba2..000000000 --- a/svc/pkg/cluster/ops/datacenter-get/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "cluster-datacenter-get" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -prost = "0.10" -rivet-operation = { path = "../../../../../lib/operation/core" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/datacenter-get/Service.toml b/svc/pkg/cluster/ops/datacenter-get/Service.toml deleted file mode 100644 index a0f9d3cb5..000000000 --- a/svc/pkg/cluster/ops/datacenter-get/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "cluster-datacenter-get" - -[runtime] -kind = "rust" - -[operation] - -[databases] -db-cluster = {} diff --git a/svc/pkg/cluster/ops/datacenter-get/src/lib.rs b/svc/pkg/cluster/ops/datacenter-get/src/lib.rs deleted file mode 100644 index d0cd01826..000000000 --- a/svc/pkg/cluster/ops/datacenter-get/src/lib.rs +++ /dev/null @@ -1,107 +0,0 @@ -use std::convert::{TryFrom, TryInto}; - -use proto::backend::{self, pkg::*}; -use rivet_operation::prelude::*; - -#[derive(sqlx::FromRow)] -struct Datacenter { - datacenter_id: Uuid, - cluster_id: Uuid, - name_id: String, - display_name: String, - provider: i64, - provider_datacenter_id: String, - provider_api_token: Option, - pools: Vec, - build_delivery_method: i64, - prebakes_enabled: bool, - create_ts: i64, -} - -impl TryFrom for backend::cluster::Datacenter { - type Error = GlobalError; - - fn try_from(value: Datacenter) -> GlobalResult { - let pools = cluster::msg::datacenter_create::Pools::decode(value.pools.as_slice())?.pools; - - Ok(backend::cluster::Datacenter { - datacenter_id: Some(value.datacenter_id.into()), - cluster_id: Some(value.cluster_id.into()), - name_id: value.name_id, - display_name: value.display_name, - create_ts: value.create_ts, - provider: value.provider as i32, - provider_datacenter_id: value.provider_datacenter_id, - provider_api_token: value.provider_api_token, - pools, - build_delivery_method: value.build_delivery_method as i32, - prebakes_enabled: value.prebakes_enabled, - }) - } -} - -#[operation(name = "cluster-datacenter-get")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let datacenter_ids = ctx - .datacenter_ids - .iter() - .map(common::Uuid::as_uuid) - .collect::>(); - - let datacenters = ctx - .cache() - .fetch_all_proto("cluster.datacenters", datacenter_ids, { - let ctx = ctx.base(); - move |mut cache, datacenter_ids| { - let ctx = ctx.clone(); - async move { - let dcs = get_dcs(ctx, datacenter_ids).await?; - for dc in dcs { - let dc_id = unwrap!(dc.datacenter_id).as_uuid(); - cache.resolve(&dc_id, dc); - } - - Ok(cache) - } - } - }) - .await?; - - Ok(cluster::datacenter_get::Response { datacenters }) -} - -async fn get_dcs( - ctx: OperationContext<()>, - datacenter_ids: Vec, -) -> GlobalResult> { - let configs = sql_fetch_all!( - [ctx, Datacenter] - " - SELECT - datacenter_id, - cluster_id, - name_id, - display_name, - provider, - provider_datacenter_id, - provider_api_token, - pools, - build_delivery_method, - prebakes_enabled, - create_ts - FROM db_cluster.datacenters - WHERE datacenter_id = ANY($1) - ", - datacenter_ids, - ) - .await?; - - let datacenters = configs - .into_iter() - .map(TryInto::try_into) - .collect::>>()?; - - Ok(datacenters) -} diff --git a/svc/pkg/cluster/ops/datacenter-list/Cargo.toml b/svc/pkg/cluster/ops/datacenter-list/Cargo.toml deleted file mode 100644 index 9d8912e10..000000000 --- a/svc/pkg/cluster/ops/datacenter-list/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "cluster-datacenter-list" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -prost = "0.10" -rivet-operation = { path = "../../../../../lib/operation/core" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/datacenter-list/Service.toml b/svc/pkg/cluster/ops/datacenter-list/Service.toml deleted file mode 100644 index ebad6361d..000000000 --- a/svc/pkg/cluster/ops/datacenter-list/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "cluster-datacenter-list" - -[runtime] -kind = "rust" - -[operation] - -[databases] -db-cluster = {} diff --git a/svc/pkg/cluster/ops/datacenter-list/src/lib.rs b/svc/pkg/cluster/ops/datacenter-list/src/lib.rs deleted file mode 100644 index 674e76562..000000000 --- a/svc/pkg/cluster/ops/datacenter-list/src/lib.rs +++ /dev/null @@ -1,62 +0,0 @@ -use std::collections::HashMap; - -use proto::backend::pkg::*; -use rivet_operation::prelude::*; - -#[derive(sqlx::FromRow)] -struct Datacenter { - cluster_id: Uuid, - datacenter_id: Uuid, -} - -#[operation(name = "cluster-datacenter-list")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let cluster_ids = ctx - .cluster_ids - .iter() - .map(common::Uuid::as_uuid) - .collect::>(); - - let datacenters = sql_fetch_all!( - [ctx, Datacenter] - " - SELECT - cluster_id, - datacenter_id - FROM db_cluster.datacenters - WHERE cluster_id = ANY($1) - ", - &cluster_ids - ) - .await?; - - // Fill in empty clusters - let mut dcs_by_cluster_id = cluster_ids - .iter() - .map(|cluster_id| (*cluster_id, Vec::new())) - .collect::>>(); - - for dc in datacenters { - dcs_by_cluster_id - .entry(dc.cluster_id) - .or_default() - .push(dc.datacenter_id); - } - - Ok(cluster::datacenter_list::Response { - clusters: dcs_by_cluster_id - .into_iter() - .map( - |(cluster_id, datacenter_ids)| cluster::datacenter_list::response::Cluster { - cluster_id: Some(cluster_id.into()), - datacenter_ids: datacenter_ids - .into_iter() - .map(Into::into) - .collect::>(), - }, - ) - .collect::>(), - }) -} diff --git a/svc/pkg/cluster/ops/datacenter-location-get/Cargo.toml b/svc/pkg/cluster/ops/datacenter-location-get/Cargo.toml deleted file mode 100644 index b8121e63d..000000000 --- a/svc/pkg/cluster/ops/datacenter-location-get/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "cluster-datacenter-location-get" -version = "0.0.1" -edition = "2021" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -rivet-operation = { path = "../../../../../lib/operation/core" } - -ip-info = { path = "../../../ip/ops/info" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/datacenter-location-get/Service.toml b/svc/pkg/cluster/ops/datacenter-location-get/Service.toml deleted file mode 100644 index f6c3656b9..000000000 --- a/svc/pkg/cluster/ops/datacenter-location-get/Service.toml +++ /dev/null @@ -1,7 +0,0 @@ -[service] -name = "cluster-datacenter-location-get" - -[runtime] -kind = "rust" - -[operation] diff --git a/svc/pkg/cluster/ops/datacenter-location-get/src/lib.rs b/svc/pkg/cluster/ops/datacenter-location-get/src/lib.rs deleted file mode 100644 index 86ebbca11..000000000 --- a/svc/pkg/cluster/ops/datacenter-location-get/src/lib.rs +++ /dev/null @@ -1,100 +0,0 @@ -use std::net::IpAddr; - -use futures_util::{StreamExt, TryStreamExt}; -use proto::backend::{self, pkg::*}; -use rivet_operation::prelude::*; - -#[operation(name = "cluster-datacenter-location-get")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let datacenter_ids = ctx - .datacenter_ids - .iter() - .map(common::Uuid::as_uuid) - .collect::>(); - - let datacenters = ctx - .cache() - .fetch_all_proto("cluster.datacenters.location", datacenter_ids, { - let ctx = ctx.base(); - move |mut cache, datacenter_ids| { - let ctx = ctx.clone(); - async move { - let dcs = query_dcs(ctx, datacenter_ids).await?; - for dc in dcs { - let dc_id = unwrap!(dc.datacenter_id).as_uuid(); - cache.resolve(&dc_id, dc); - } - - Ok(cache) - } - } - }) - .await?; - - Ok(cluster::datacenter_location_get::Response { datacenters }) -} - -async fn query_dcs( - ctx: OperationContext<()>, - datacenter_ids: Vec, -) -> GlobalResult> { - // NOTE: if there is no active GG node in a datacenter, we cannot retrieve its location - // Fetch the gg node public ip for each datacenter (there may be more than one, hence `DISTINCT`) - let server_rows = sql_fetch_all!( - [ctx, (Uuid, Option,)] - " - SELECT DISTINCT - datacenter_id, public_ip - FROM db_cluster.servers - WHERE - datacenter_id = ANY($1) AND - pool_type = $2 AND - cloud_destroy_ts IS NULL - -- For consistency - ORDER BY public_ip DESC - ", - &datacenter_ids, - backend::cluster::PoolType::Gg as i64, - ) - .await?; - - let coords_res = futures_util::stream::iter(server_rows) - .map(|(datacenter_id, public_ip)| { - let ctx = ctx.base(); - - async move { - if let Some(public_ip) = public_ip { - // Fetch IP info of GG node (this is cached inside `ip_info`) - let ip_info_res = op!([ctx] ip_info { - ip: public_ip.to_string(), - provider: ip::info::Provider::IpInfoIo as i32, - }) - .await?; - GlobalResult::Ok(( - datacenter_id, - ip_info_res - .ip_info - .as_ref() - .and_then(|info| info.coords.clone()), - )) - } else { - GlobalResult::Ok((datacenter_id, None)) - } - } - }) - .buffer_unordered(8) - .try_collect::>() - .await?; - - Ok(coords_res - .into_iter() - .map( - |(datacenter_id, coords)| cluster::datacenter_location_get::response::Datacenter { - datacenter_id: Some(datacenter_id.into()), - coords, - }, - ) - .collect::>()) -} diff --git a/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/Service.toml b/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/Service.toml deleted file mode 100644 index aa845fc9a..000000000 --- a/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "cluster-datacenter-resolve-for-name-id" - -[runtime] -kind = "rust" - -[operation] - -[databases] -db-cluster = {} diff --git a/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/src/lib.rs b/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/src/lib.rs deleted file mode 100644 index ff2e9e3ec..000000000 --- a/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/src/lib.rs +++ /dev/null @@ -1,41 +0,0 @@ -use proto::backend::pkg::*; -use rivet_operation::prelude::*; - -#[derive(sqlx::FromRow)] -struct Datacenter { - datacenter_id: Uuid, - name_id: String, -} - -#[operation(name = "cluster-datacenter-resolve-for-name-id")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let cluster_id = unwrap_ref!(ctx.cluster_id).as_uuid(); - - let datacenters = sql_fetch_all!( - [ctx, Datacenter] - " - SELECT - datacenter_id, - name_id - FROM db_cluster.datacenters - WHERE - cluster_id = $1 AND - name_id = ANY($2) - ", - &cluster_id, - &ctx.name_ids, - ) - .await? - .into_iter() - .map( - |dc| cluster::datacenter_resolve_for_name_id::response::Datacenter { - datacenter_id: Some(dc.datacenter_id.into()), - name_id: dc.name_id, - }, - ) - .collect::>(); - - Ok(cluster::datacenter_resolve_for_name_id::Response { datacenters }) -} diff --git a/svc/pkg/cluster/ops/datacenter-tls-get/Cargo.toml b/svc/pkg/cluster/ops/datacenter-tls-get/Cargo.toml deleted file mode 100644 index 9b3cd70b5..000000000 --- a/svc/pkg/cluster/ops/datacenter-tls-get/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "cluster-datacenter-tls-get" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -prost = "0.10" -rivet-operation = { path = "../../../../../lib/operation/core" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/datacenter-tls-get/Service.toml b/svc/pkg/cluster/ops/datacenter-tls-get/Service.toml deleted file mode 100644 index a09426cfc..000000000 --- a/svc/pkg/cluster/ops/datacenter-tls-get/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "cluster-datacenter-tls-get" - -[runtime] -kind = "rust" - -[operation] - -[databases] -db-cluster = {} diff --git a/svc/pkg/cluster/ops/datacenter-tls-get/src/lib.rs b/svc/pkg/cluster/ops/datacenter-tls-get/src/lib.rs deleted file mode 100644 index 3aaa8d074..000000000 --- a/svc/pkg/cluster/ops/datacenter-tls-get/src/lib.rs +++ /dev/null @@ -1,60 +0,0 @@ -use proto::backend::pkg::*; -use rivet_operation::prelude::*; - -#[derive(sqlx::FromRow)] -struct DatacenterTls { - datacenter_id: Uuid, - gg_cert_pem: Option, - gg_private_key_pem: Option, - job_cert_pem: Option, - job_private_key_pem: Option, - state: i64, - expire_ts: i64, -} - -impl From for cluster::datacenter_tls_get::response::Datacenter { - fn from(value: DatacenterTls) -> Self { - cluster::datacenter_tls_get::response::Datacenter { - datacenter_id: Some(value.datacenter_id.into()), - gg_cert_pem: value.gg_cert_pem, - gg_private_key_pem: value.gg_private_key_pem, - job_cert_pem: value.job_cert_pem, - job_private_key_pem: value.job_private_key_pem, - state: value.state as i32, - expire_ts: value.expire_ts, - } - } -} - -#[operation(name = "cluster-datacenter-tls-get")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let datacenter_ids = ctx - .datacenter_ids - .iter() - .map(common::Uuid::as_uuid) - .collect::>(); - - let rows = sql_fetch_all!( - [ctx, DatacenterTls] - " - SELECT - datacenter_id, - gg_cert_pem, - gg_private_key_pem, - job_cert_pem, - job_private_key_pem, - state, - expire_ts - FROM db_cluster.datacenter_tls - WHERE datacenter_id = ANY($1) - ", - datacenter_ids, - ) - .await?; - - Ok(cluster::datacenter_tls_get::Response { - datacenters: rows.into_iter().map(Into::into).collect::>(), - }) -} diff --git a/svc/pkg/cluster/ops/datacenter-topology-get/Cargo.toml b/svc/pkg/cluster/ops/datacenter-topology-get/Cargo.toml deleted file mode 100644 index 51d851aa8..000000000 --- a/svc/pkg/cluster/ops/datacenter-topology-get/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "cluster-datacenter-topology-get" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -lazy_static = "1.4" -nomad-util = { path = "../../../../../lib/nomad-util" } -prost = "0.10" -rivet-operation = { path = "../../../../../lib/operation/core" } - -[dependencies.nomad_client] -git = "https://github.com/rivet-gg/nomad-client" -rev = "abb66bf0c30c7ff5b0c695dae952481c33e538b5" # pragma: allowlist secret - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/datacenter-topology-get/README.md b/svc/pkg/cluster/ops/datacenter-topology-get/README.md deleted file mode 100644 index b24df2068..000000000 --- a/svc/pkg/cluster/ops/datacenter-topology-get/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# datacenter-topology-get - -Fetch the nomad topology for all job servers in a datacenter diff --git a/svc/pkg/cluster/ops/datacenter-topology-get/Service.toml b/svc/pkg/cluster/ops/datacenter-topology-get/Service.toml deleted file mode 100644 index 3c31348cf..000000000 --- a/svc/pkg/cluster/ops/datacenter-topology-get/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "cluster-datacenter-topology-get" - -[runtime] -kind = "rust" - -[operation] - -[databases] -db-cluster = {} diff --git a/svc/pkg/cluster/ops/get-for-game/Cargo.toml b/svc/pkg/cluster/ops/get-for-game/Cargo.toml deleted file mode 100644 index 5ac4fb817..000000000 --- a/svc/pkg/cluster/ops/get-for-game/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "cluster-get-for-game" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -prost = "0.10" -rivet-operation = { path = "../../../../../lib/operation/core" } -util-cluster = { package = "rivet-util-cluster", path = "../../util" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/get-for-game/Service.toml b/svc/pkg/cluster/ops/get-for-game/Service.toml deleted file mode 100644 index c6b8f6f34..000000000 --- a/svc/pkg/cluster/ops/get-for-game/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "cluster-get-for-game" - -[runtime] -kind = "rust" - -[operation] - -[databases] -db-cluster = {} diff --git a/svc/pkg/cluster/ops/get-for-game/src/lib.rs b/svc/pkg/cluster/ops/get-for-game/src/lib.rs deleted file mode 100644 index 053a09d0b..000000000 --- a/svc/pkg/cluster/ops/get-for-game/src/lib.rs +++ /dev/null @@ -1,42 +0,0 @@ -use proto::backend::pkg::*; -use rivet_operation::prelude::*; - -#[operation(name = "cluster-get-for-game")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let game_ids = ctx - .game_ids - .iter() - .map(common::Uuid::as_uuid) - .collect::>(); - - let rows = sql_fetch_optional!( - [ctx, (Uuid, Option)] - " - SELECT - g.game_id, gc.cluster_id - FROM unnest($1) AS g(game_id) - LEFT JOIN db_cluster.games AS gc - ON g.game_id = gc.game_id - ", - game_ids, - ) - .await?; - - Ok(cluster::get_for_game::Response { - games: rows - .into_iter() - .map( - |(game_id, cluster_id)| cluster::get_for_game::response::Game { - game_id: Some(game_id.into()), - cluster_id: Some( - cluster_id - .unwrap_or_else(util_cluster::default_cluster_id) - .into(), - ), - }, - ) - .collect::>(), - }) -} diff --git a/svc/pkg/cluster/ops/get/Cargo.toml b/svc/pkg/cluster/ops/get/Cargo.toml deleted file mode 100644 index 7f62318ae..000000000 --- a/svc/pkg/cluster/ops/get/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "cluster-get" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -prost = "0.10" -rivet-operation = { path = "../../../../../lib/operation/core" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/get/Service.toml b/svc/pkg/cluster/ops/get/Service.toml deleted file mode 100644 index 06f53f69b..000000000 --- a/svc/pkg/cluster/ops/get/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "cluster-get" - -[runtime] -kind = "rust" - -[operation] - -[databases] -db-cluster = {} diff --git a/svc/pkg/cluster/ops/get/src/lib.rs b/svc/pkg/cluster/ops/get/src/lib.rs deleted file mode 100644 index e4892bc35..000000000 --- a/svc/pkg/cluster/ops/get/src/lib.rs +++ /dev/null @@ -1,53 +0,0 @@ -use proto::backend::{self, pkg::*}; -use rivet_operation::prelude::*; - -#[derive(sqlx::FromRow)] -struct Cluster { - cluster_id: Uuid, - name_id: String, - owner_team_id: Option, - create_ts: i64, -} - -impl From for backend::cluster::Cluster { - fn from(value: Cluster) -> Self { - backend::cluster::Cluster { - cluster_id: Some(value.cluster_id.into()), - name_id: value.name_id, - owner_team_id: value.owner_team_id.map(Into::into), - create_ts: value.create_ts, - } - } -} - -#[operation(name = "cluster-get")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let crdb = ctx.crdb().await?; - let cluster_ids = ctx - .cluster_ids - .iter() - .map(common::Uuid::as_uuid) - .collect::>(); - - let clusters = sql_fetch_all!( - [ctx, Cluster, &crdb] - " - SELECT - cluster_id, - name_id, - owner_team_id, - create_ts - FROM db_cluster.clusters - WHERE cluster_id = ANY($1) - ", - cluster_ids - ) - .await? - .into_iter() - .map(Into::into) - .collect::>(); - - Ok(cluster::get::Response { clusters }) -} diff --git a/svc/pkg/cluster/ops/list/Cargo.toml b/svc/pkg/cluster/ops/list/Cargo.toml deleted file mode 100644 index 99f578e4a..000000000 --- a/svc/pkg/cluster/ops/list/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "cluster-list" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -prost = "0.10" -rivet-operation = { path = "../../../../../lib/operation/core" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/list/Service.toml b/svc/pkg/cluster/ops/list/Service.toml deleted file mode 100644 index a41334f3f..000000000 --- a/svc/pkg/cluster/ops/list/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "cluster-list" - -[runtime] -kind = "rust" - -[operation] - -[databases] -db-cluster = {} diff --git a/svc/pkg/cluster/ops/list/src/lib.rs b/svc/pkg/cluster/ops/list/src/lib.rs deleted file mode 100644 index 62cca800f..000000000 --- a/svc/pkg/cluster/ops/list/src/lib.rs +++ /dev/null @@ -1,46 +0,0 @@ -use proto::backend::{self, pkg::*}; -use rivet_operation::prelude::*; - -#[derive(sqlx::FromRow)] -struct Cluster { - cluster_id: Uuid, - name_id: String, - owner_team_id: Option, - create_ts: i64, -} - -impl From for backend::cluster::Cluster { - fn from(value: Cluster) -> Self { - backend::cluster::Cluster { - cluster_id: Some(value.cluster_id.into()), - name_id: value.name_id, - owner_team_id: value.owner_team_id.map(Into::into), - create_ts: value.create_ts, - } - } -} - -#[operation(name = "cluster-list")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let crdb = ctx.crdb().await?; - - let cluster_ids = sql_fetch_all!( - [ctx, Cluster, &crdb] - " - SELECT - cluster_id, - name_id, - owner_team_id, - create_ts - FROM db_cluster.clusters - ", - ) - .await? - .into_iter() - .map(|cluster| cluster.cluster_id.into()) - .collect::>(); - - Ok(cluster::list::Response { cluster_ids }) -} diff --git a/svc/pkg/cluster/ops/resolve-for-name-id/Cargo.toml b/svc/pkg/cluster/ops/resolve-for-name-id/Cargo.toml deleted file mode 100644 index 2fdac5937..000000000 --- a/svc/pkg/cluster/ops/resolve-for-name-id/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "cluster-resolve-for-name-id" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -prost = "0.10" -rivet-operation = { path = "../../../../../lib/operation/core" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/resolve-for-name-id/Service.toml b/svc/pkg/cluster/ops/resolve-for-name-id/Service.toml deleted file mode 100644 index e944c42f5..000000000 --- a/svc/pkg/cluster/ops/resolve-for-name-id/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "cluster-resolve-for-name-id" - -[runtime] -kind = "rust" - -[operation] - -[databases] -db-cluster = {} diff --git a/svc/pkg/cluster/ops/resolve-for-name-id/src/lib.rs b/svc/pkg/cluster/ops/resolve-for-name-id/src/lib.rs deleted file mode 100644 index 8cafcf105..000000000 --- a/svc/pkg/cluster/ops/resolve-for-name-id/src/lib.rs +++ /dev/null @@ -1,35 +0,0 @@ -use proto::backend::pkg::*; -use rivet_operation::prelude::*; - -#[derive(sqlx::FromRow)] -struct Cluster { - cluster_id: Uuid, - name_id: String, -} - -#[operation(name = "cluster-resolve-for-name-id")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let clusters = sql_fetch_all!( - [ctx, Cluster] - " - SELECT - cluster_id, - name_id - FROM db_cluster.clusters - WHERE - name_id = ANY($1) - ", - &ctx.name_ids, - ) - .await? - .into_iter() - .map(|dc| cluster::resolve_for_name_id::response::Cluster { - cluster_id: Some(dc.cluster_id.into()), - name_id: dc.name_id, - }) - .collect::>(); - - Ok(cluster::resolve_for_name_id::Response { clusters }) -} diff --git a/svc/pkg/cluster/ops/server-destroy-with-filter/Cargo.toml b/svc/pkg/cluster/ops/server-destroy-with-filter/Cargo.toml deleted file mode 100644 index d2e39e60b..000000000 --- a/svc/pkg/cluster/ops/server-destroy-with-filter/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "cluster-server-destroy-with-filter" -version = "0.0.1" -edition = "2021" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -rivet-operation = { path = "../../../../../lib/operation/core" } -cluster-server-list = { path = "../server-list" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/server-destroy-with-filter/Service.toml b/svc/pkg/cluster/ops/server-destroy-with-filter/Service.toml deleted file mode 100644 index 12aba4dd1..000000000 --- a/svc/pkg/cluster/ops/server-destroy-with-filter/Service.toml +++ /dev/null @@ -1,7 +0,0 @@ -[service] -name = "cluster-server-destroy-with-filter" - -[runtime] -kind = "rust" - -[operation] diff --git a/svc/pkg/cluster/ops/server-get/Cargo.toml b/svc/pkg/cluster/ops/server-get/Cargo.toml deleted file mode 100644 index 5861543d2..000000000 --- a/svc/pkg/cluster/ops/server-get/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "cluster-server-get" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -prost = "0.10" -rivet-operation = { path = "../../../../../lib/operation/core" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false -features = [ "ipnetwork" ] - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/server-get/Service.toml b/svc/pkg/cluster/ops/server-get/Service.toml deleted file mode 100644 index 496afacad..000000000 --- a/svc/pkg/cluster/ops/server-get/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "cluster-server-get" - -[runtime] -kind = "rust" - -[operation] - -[databases] -db-cluster = {} diff --git a/svc/pkg/cluster/ops/server-get/src/lib.rs b/svc/pkg/cluster/ops/server-get/src/lib.rs deleted file mode 100644 index ff7297cfb..000000000 --- a/svc/pkg/cluster/ops/server-get/src/lib.rs +++ /dev/null @@ -1,71 +0,0 @@ -use std::{ - convert::{TryFrom, TryInto}, - net::IpAddr, -}; - -use proto::backend::{self, pkg::*}; -use rivet_operation::prelude::*; - -#[derive(sqlx::FromRow)] -struct Server { - server_id: Uuid, - cluster_id: Uuid, - datacenter_id: Uuid, - pool_type: i64, - vlan_ip: Option, - public_ip: Option, - cloud_destroy_ts: Option, -} - -impl TryFrom for backend::cluster::Server { - type Error = GlobalError; - - fn try_from(value: Server) -> GlobalResult { - Ok(backend::cluster::Server { - server_id: Some(value.server_id.into()), - cluster_id: Some(value.cluster_id.into()), - datacenter_id: Some(value.datacenter_id.into()), - pool_type: value.pool_type.try_into()?, - vlan_ip: value.vlan_ip.map(|ip| ip.to_string()), - public_ip: value.public_ip.map(|ip| ip.to_string()), - cloud_destroy_ts: value.cloud_destroy_ts, - }) - } -} - -#[operation(name = "cluster-server-get")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let server_ids = ctx - .server_ids - .iter() - .map(common::Uuid::as_uuid) - .collect::>(); - - let servers = sql_fetch_all!( - [ctx, Server] - " - SELECT - server_id, - d.cluster_id, - s.datacenter_id, - pool_type, - vlan_ip, - public_ip, - cloud_destroy_ts - FROM db_cluster.servers AS s - LEFT JOIN db_cluster.datacenters AS d ON s.datacenter_id = d.datacenter_id - WHERE server_id = ANY($1) - ", - server_ids - ) - .await?; - - Ok(cluster::server_get::Response { - servers: servers - .into_iter() - .map(TryInto::try_into) - .collect::>>()?, - }) -} diff --git a/svc/pkg/cluster/ops/server-list/Cargo.toml b/svc/pkg/cluster/ops/server-list/Cargo.toml deleted file mode 100644 index e79c7aabf..000000000 --- a/svc/pkg/cluster/ops/server-list/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "cluster-server-list" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -prost = "0.10" -rivet-operation = { path = "../../../../../lib/operation/core" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/server-list/Service.toml b/svc/pkg/cluster/ops/server-list/Service.toml deleted file mode 100644 index f0def326f..000000000 --- a/svc/pkg/cluster/ops/server-list/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "cluster-server-list" - -[runtime] -kind = "rust" - -[operation] - -[databases] -db-cluster = {} diff --git a/svc/pkg/cluster/ops/server-list/src/lib.rs b/svc/pkg/cluster/ops/server-list/src/lib.rs deleted file mode 100644 index cc9b50d91..000000000 --- a/svc/pkg/cluster/ops/server-list/src/lib.rs +++ /dev/null @@ -1,123 +0,0 @@ -use std::{ - convert::{TryFrom, TryInto}, - net::IpAddr, -}; - -use proto::backend::{self, pkg::*}; -use rivet_operation::prelude::*; - -#[derive(sqlx::FromRow)] -struct Server { - server_id: Uuid, - cluster_id: Uuid, - datacenter_id: Uuid, - pool_type: i64, - vlan_ip: Option, - public_ip: Option, - cloud_destroy_ts: Option, -} - -impl TryFrom for backend::cluster::Server { - type Error = GlobalError; - - fn try_from(value: Server) -> GlobalResult { - Ok(backend::cluster::Server { - server_id: Some(value.server_id.into()), - cluster_id: Some(value.cluster_id.into()), - datacenter_id: Some(value.datacenter_id.into()), - pool_type: value.pool_type.try_into()?, - vlan_ip: value.vlan_ip.map(|ip| ip.to_string()), - public_ip: value.public_ip.map(|ip| ip.to_string()), - cloud_destroy_ts: value.cloud_destroy_ts, - }) - } -} - -#[operation(name = "cluster-server-list")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let filter = unwrap_ref!(ctx.filter); - - let server_ids = if filter.filter_server_ids { - Some( - filter - .server_ids - .iter() - .map(|&x| x.into()) - .collect::>(), - ) - } else { - None - }; - let cluster_ids = if filter.filter_cluster_ids { - Some( - filter - .cluster_ids - .iter() - .map(|&x| x.into()) - .collect::>(), - ) - } else { - None - }; - let datacenter_ids = if filter.filter_datacenter_ids { - Some( - filter - .datacenter_ids - .iter() - .map(|&x| x.into()) - .collect::>(), - ) - } else { - None - }; - let pool_types = if filter.filter_pool_types { - Some(&filter.pool_types) - } else { - None - }; - let public_ips = if filter.filter_public_ips { - Some(&filter.public_ips) - } else { - None - }; - - let servers = sql_fetch_all!( - [ctx, Server] - " - SELECT - s.server_id, - d.cluster_id, - s.datacenter_id, - s.pool_type, - s.vlan_ip, - s.public_ip, - s.cloud_destroy_ts - FROM db_cluster.servers AS s - JOIN db_cluster.datacenters AS d - ON s.datacenter_id = d.datacenter_id - WHERE - ($1 OR s.cloud_destroy_ts IS NULL) - AND ($2 IS NULL OR s.server_id = ANY($2)) - AND ($3 IS NULL OR d.cluster_id = ANY($3)) - AND ($4 IS NULL OR s.datacenter_id = ANY($4)) - AND ($5 IS NULL OR s.pool_type = ANY($5)) - AND ($6 IS NULL OR s.public_ip = ANY($6::inet[])) - ", - ctx.include_destroyed, - &server_ids, - &cluster_ids, - &datacenter_ids, - &pool_types, - &public_ips, - ) - .await?; - - Ok(cluster::server_list::Response { - servers: servers - .into_iter() - .map(TryInto::try_into) - .collect::>>()?, - }) -} diff --git a/svc/pkg/cluster/ops/server-resolve-for-ip/Cargo.toml b/svc/pkg/cluster/ops/server-resolve-for-ip/Cargo.toml deleted file mode 100644 index 43e8d63f7..000000000 --- a/svc/pkg/cluster/ops/server-resolve-for-ip/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "cluster-server-resolve-for-ip" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -prost = "0.10" -rivet-operation = { path = "../../../../../lib/operation/core" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/server-resolve-for-ip/Service.toml b/svc/pkg/cluster/ops/server-resolve-for-ip/Service.toml deleted file mode 100644 index 0ad9fa42d..000000000 --- a/svc/pkg/cluster/ops/server-resolve-for-ip/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "cluster-server-resolve-for-ip" - -[runtime] -kind = "rust" - -[operation] - -[databases] -db-cluster = {} diff --git a/svc/pkg/cluster/ops/server-resolve-for-ip/src/lib.rs b/svc/pkg/cluster/ops/server-resolve-for-ip/src/lib.rs deleted file mode 100644 index d28efd5a2..000000000 --- a/svc/pkg/cluster/ops/server-resolve-for-ip/src/lib.rs +++ /dev/null @@ -1,42 +0,0 @@ -use std::net::IpAddr; - -use proto::backend::pkg::*; -use rivet_operation::prelude::*; - -#[derive(sqlx::FromRow)] -struct Server { - server_id: Uuid, - public_ip: IpAddr, -} - -impl From for cluster::server_resolve_for_ip::response::Server { - fn from(value: Server) -> Self { - cluster::server_resolve_for_ip::response::Server { - server_id: Some(value.server_id.into()), - public_ip: value.public_ip.to_string(), - } - } -} - -#[operation(name = "cluster-server-resolve-for-ip")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let servers = sql_fetch_all!( - [ctx, Server] - " - SELECT - server_id, public_ip - FROM db_cluster.servers - WHERE - public_ip = ANY($1) AND - cloud_destroy_ts IS NULL - ", - &ctx.ips - ) - .await?; - - Ok(cluster::server_resolve_for_ip::Response { - servers: servers.into_iter().map(Into::into).collect::>(), - }) -} diff --git a/svc/pkg/cluster/src/lib.rs b/svc/pkg/cluster/src/lib.rs new file mode 100644 index 000000000..7d7a78ea9 --- /dev/null +++ b/svc/pkg/cluster/src/lib.rs @@ -0,0 +1,15 @@ +use chirp_workflow::prelude::*; + +pub mod ops; +pub mod types; +pub mod util; +pub mod workflows; + +pub fn registry() -> Registry { + use workflows::*; + + let mut registry = Registry::new(); + registry.register_workflow::(); + + registry +} diff --git a/svc/pkg/cluster/src/ops/datacenter/get.rs b/svc/pkg/cluster/src/ops/datacenter/get.rs new file mode 100644 index 000000000..1fbe9e650 --- /dev/null +++ b/svc/pkg/cluster/src/ops/datacenter/get.rs @@ -0,0 +1,129 @@ +use std::convert::{TryFrom, TryInto}; + +use chirp_workflow::prelude::*; +use rivet_operation::prelude::{proto::backend, Message}; + +use crate::types::{BuildDeliveryMethod, Datacenter, Pool, Provider}; + +pub struct Input { + pub datacenter_ids: Vec, +} + +pub struct Output { + pub datacenters: Vec, +} + +#[derive(sqlx::FromRow)] +struct DatacenterRow { + datacenter_id: Uuid, + cluster_id: Uuid, + name_id: String, + display_name: String, + provider2: Option>, + provider: i64, + provider_datacenter_id: String, + provider_api_token: Option, + pools2: Option>>, + pools: Vec, + build_delivery_method2: Option>, + build_delivery_method: i64, + prebakes_enabled: bool, + create_ts: i64, +} + +impl TryFrom for Datacenter { + type Error = GlobalError; + + fn try_from(value: DatacenterRow) -> GlobalResult { + Ok(Datacenter { + datacenter_id: value.datacenter_id, + cluster_id: value.cluster_id, + name_id: value.name_id, + display_name: value.display_name, + create_ts: value.create_ts, + // Handle backwards compatibility + provider: if let Some(provider) = value.provider2 { + provider.0 + } else { + value.provider.try_into()? + }, + provider_datacenter_id: value.provider_datacenter_id, + provider_api_token: value.provider_api_token, + // Handle backwards compatibility + pools: if let Some(pools) = value.pools2 { + pools.0 + } else { + let proto = backend::cluster::Pools::decode(value.pools.as_slice())?.pools; + + proto + .into_iter() + .map(TryInto::try_into) + .collect::>>()? + }, + // Handle backwards compatibility + build_delivery_method: if let Some(build_delivery_method) = value.build_delivery_method2 + { + build_delivery_method.0 + } else { + value.build_delivery_method.try_into()? + }, + prebakes_enabled: value.prebakes_enabled, + }) + } +} + +#[operation] +pub async fn cluster_datacenter_get(ctx: &OperationCtx, input: &Input) -> GlobalResult { + let datacenters = ctx + .cache() + .fetch_all_json("cluster.datacenters", input.datacenter_ids.clone(), { + let ctx = ctx.clone(); + move |mut cache, datacenter_ids| { + let ctx = ctx.clone(); + async move { + let dcs = get_dcs(ctx, datacenter_ids).await?; + for dc in dcs { + let dc_id = dc.datacenter_id; + cache.resolve(&dc_id, dc); + } + + Ok(cache) + } + } + }) + .await?; + + Ok(Output { datacenters }) +} + +async fn get_dcs(ctx: OperationCtx, datacenter_ids: Vec) -> GlobalResult> { + let dc_rows = sql_fetch_all!( + [ctx, DatacenterRow] + " + SELECT + datacenter_id, + cluster_id, + name_id, + display_name, + provider, + provider2, + provider_datacenter_id, + provider_api_token, + pools, + pools2, + build_delivery_method, + build_delivery_method2, + prebakes_enabled, + create_ts + FROM db_cluster.datacenters + WHERE datacenter_id = ANY($1) + ", + datacenter_ids, + ) + .await?; + + dc_rows + .into_iter() + .map(TryInto::try_into) + .collect::>>() +} diff --git a/svc/pkg/cluster/src/ops/datacenter/list.rs b/svc/pkg/cluster/src/ops/datacenter/list.rs new file mode 100644 index 000000000..1ca195ac8 --- /dev/null +++ b/svc/pkg/cluster/src/ops/datacenter/list.rs @@ -0,0 +1,56 @@ +use std::collections::HashMap; + +use chirp_workflow::prelude::*; + +pub struct Input { + pub cluster_ids: Vec, +} + +pub struct Output { + pub clusters: Vec, +} + +pub struct Cluster { + pub cluster_id: Uuid, + pub datacenter_ids: Vec, +} + +#[operation] +pub async fn cluster_datacenter_list(ctx: &OperationCtx, input: &Input) -> GlobalResult { + let rows = sql_fetch_all!( + [ctx, (Uuid, Uuid)] + " + SELECT + cluster_id, + datacenter_id + FROM db_cluster.datacenters + WHERE cluster_id = ANY($1) + ", + &input.cluster_ids, + ) + .await?; + + // Fill in empty clusters + let mut dcs_by_cluster_id = input + .cluster_ids + .iter() + .map(|cluster_id| (*cluster_id, Vec::new())) + .collect::>>(); + + for (cluster_id, datacenter_id) in rows { + dcs_by_cluster_id + .entry(cluster_id) + .or_default() + .push(datacenter_id); + } + + Ok(Output { + clusters: dcs_by_cluster_id + .into_iter() + .map(|(cluster_id, datacenter_ids)| Cluster { + cluster_id, + datacenter_ids, + }) + .collect::>(), + }) +} diff --git a/svc/pkg/cluster/src/ops/datacenter/location_get.rs b/svc/pkg/cluster/src/ops/datacenter/location_get.rs new file mode 100644 index 000000000..a0da752ce --- /dev/null +++ b/svc/pkg/cluster/src/ops/datacenter/location_get.rs @@ -0,0 +1,121 @@ +use std::net::IpAddr; + +use chirp_workflow::prelude::*; +use futures_util::{StreamExt, TryStreamExt}; +use rivet_operation::prelude::proto::backend::pkg::*; + +use crate::types::PoolType; + +pub struct Input { + pub datacenter_ids: Vec, +} + +pub struct Output { + pub datacenters: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct Datacenter { + datacenter_id: Uuid, + coords: Coordinates, +} + +// TODO: Move to a common types lib +#[derive(Debug, Serialize, Deserialize)] +pub struct Coordinates { + longitude: f64, + latitude: f64, +} + +#[operation] +pub async fn cluster_datacenter_location_get( + ctx: &OperationCtx, + input: &Input, +) -> GlobalResult { + let datacenters = ctx + .cache() + .fetch_all_json( + "cluster.datacenters.location", + input.datacenter_ids.clone(), + { + let ctx = ctx.clone(); + move |mut cache, datacenter_ids| { + let ctx = ctx.clone(); + async move { + let dcs = query_dcs(ctx, datacenter_ids).await?; + for dc in dcs { + let dc_id = dc.datacenter_id; + cache.resolve(&dc_id, dc); + } + + Ok(cache) + } + } + }, + ) + .await?; + + Ok(Output { datacenters }) +} + +async fn query_dcs(ctx: OperationCtx, datacenter_ids: Vec) -> GlobalResult> { + // NOTE: if there is no active GG node in a datacenter, we cannot retrieve its location + // Fetch the gg node public ip for each datacenter (there may be more than one, hence `DISTINCT`) + let server_rows = sql_fetch_all!( + [ctx, (Uuid, IpAddr)] + " + SELECT DISTINCT + datacenter_id, public_ip + FROM db_cluster.servers + WHERE + datacenter_id = ANY($1) AND + pool_type2 = $2 AND + public_ip IS NOT NULL AND + cloud_destroy_ts IS NULL + -- For consistency + ORDER BY public_ip DESC + ", + &datacenter_ids, + serde_json::to_string(&PoolType::Gg)?, + ) + .await?; + + let coords_res = futures_util::stream::iter(server_rows) + .map(|(datacenter_id, public_ip)| { + let ctx = ctx.clone(); + + async move { + // Fetch IP info of GG node (this is cached inside `ip_info`) + let ip_info_res = op!([ctx] ip_info { + ip: public_ip.to_string(), + provider: ip::info::Provider::IpInfoIo as i32, + }) + .await?; + + GlobalResult::Ok(( + datacenter_id, + ip_info_res + .ip_info + .as_ref() + .and_then(|info| info.coords.as_ref()) + .map(|coords| Coordinates { + longitude: coords.longitude, + latitude: coords.latitude, + }), + )) + } + }) + .buffer_unordered(8) + .try_collect::>() + .await?; + + Ok(coords_res + .into_iter() + .filter_map(|(datacenter_id, coords)| { + coords.map(|coords| Datacenter { + datacenter_id, + coords, + }) + }) + .collect::>()) +} diff --git a/svc/pkg/cluster/src/ops/datacenter/mod.rs b/svc/pkg/cluster/src/ops/datacenter/mod.rs new file mode 100644 index 000000000..dce3767e2 --- /dev/null +++ b/svc/pkg/cluster/src/ops/datacenter/mod.rs @@ -0,0 +1,6 @@ +pub mod get; +pub mod list; +pub mod location_get; +pub mod resolve_for_name_id; +pub mod tls_get; +pub mod topology_get; diff --git a/svc/pkg/cluster/src/ops/datacenter/resolve_for_name_id.rs b/svc/pkg/cluster/src/ops/datacenter/resolve_for_name_id.rs new file mode 100644 index 000000000..a47d8f27e --- /dev/null +++ b/svc/pkg/cluster/src/ops/datacenter/resolve_for_name_id.rs @@ -0,0 +1,40 @@ +use chirp_workflow::prelude::*; + +pub struct Input { + pub cluster_id: Uuid, + pub name_ids: Vec, +} + +pub struct Output { + pub datacenters: Vec, +} + +#[derive(sqlx::FromRow)] +pub struct Datacenter { + pub datacenter_id: Uuid, + pub name_id: String, +} + +#[operation] +pub async fn cluster_datacenter_resolve_for_name_id( + ctx: &OperationCtx, + input: &Input, +) -> GlobalResult { + let datacenters = sql_fetch_all!( + [ctx, Datacenter] + " + SELECT + datacenter_id, + name_id + FROM db_cluster.datacenters + WHERE + cluster_id = $1 AND + name_id = ANY($2) + ", + &input.cluster_id, + &input.name_ids, + ) + .await?; + + Ok(Output { datacenters }) +} diff --git a/svc/pkg/cluster/src/ops/datacenter/tls_get.rs b/svc/pkg/cluster/src/ops/datacenter/tls_get.rs new file mode 100644 index 000000000..8f28e7dbf --- /dev/null +++ b/svc/pkg/cluster/src/ops/datacenter/tls_get.rs @@ -0,0 +1,85 @@ +use std::convert::{TryFrom, TryInto}; + +use chirp_workflow::prelude::*; + +use crate::types::TlsState; + +#[derive(sqlx::FromRow)] +struct DatacenterTlsRow { + datacenter_id: Uuid, + gg_cert_pem: Option, + gg_private_key_pem: Option, + job_cert_pem: Option, + job_private_key_pem: Option, + state: i64, + state2: Option>, + expire_ts: i64, +} + +impl TryFrom for DatacenterTls { + type Error = GlobalError; + + fn try_from(value: DatacenterTlsRow) -> GlobalResult { + Ok(DatacenterTls { + datacenter_id: value.datacenter_id, + gg_cert_pem: value.gg_cert_pem, + gg_private_key_pem: value.gg_private_key_pem, + job_cert_pem: value.job_cert_pem, + job_private_key_pem: value.job_private_key_pem, + // Handle backwards compatibility + state: if let Some(state) = value.state2 { + state.0 + } else { + value.state.try_into()? + }, + expire_ts: value.expire_ts, + }) + } +} + +pub struct Input { + pub datacenter_ids: Vec, +} + +pub struct Output { + pub datacenters: Vec, +} + +pub struct DatacenterTls { + pub datacenter_id: Uuid, + pub gg_cert_pem: Option, + pub gg_private_key_pem: Option, + pub job_cert_pem: Option, + pub job_private_key_pem: Option, + pub state: TlsState, + pub expire_ts: i64, +} + +#[operation] +pub async fn cluster_datacenter_tls_get(ctx: &OperationCtx, input: &Input) -> GlobalResult { + let rows = sql_fetch_all!( + [ctx, DatacenterTlsRow] + " + SELECT + datacenter_id, + gg_cert_pem, + gg_private_key_pem, + job_cert_pem, + job_private_key_pem, + state, + state2, + expire_ts + FROM db_cluster.datacenter_tls + WHERE datacenter_id = ANY($1) + ", + &input.datacenter_ids, + ) + .await?; + + Ok(Output { + datacenters: rows + .into_iter() + .map(TryInto::try_into) + .collect::>>()?, + }) +} diff --git a/svc/pkg/cluster/ops/datacenter-topology-get/src/lib.rs b/svc/pkg/cluster/src/ops/datacenter/topology_get.rs similarity index 75% rename from svc/pkg/cluster/ops/datacenter-topology-get/src/lib.rs rename to svc/pkg/cluster/src/ops/datacenter/topology_get.rs index 582a11ec1..c97dc7d33 100644 --- a/svc/pkg/cluster/ops/datacenter-topology-get/src/lib.rs +++ b/svc/pkg/cluster/src/ops/datacenter/topology_get.rs @@ -1,32 +1,52 @@ use std::collections::HashMap; +use chirp_workflow::prelude::*; use nomad_client::apis::{allocations_api, configuration::Configuration, nodes_api}; -use proto::backend::pkg::*; -use rivet_operation::prelude::*; lazy_static::lazy_static! { static ref NOMAD_CONFIG: Configuration = nomad_util::new_config_from_env().unwrap(); } #[derive(sqlx::FromRow)] -struct Server { +struct ServerRow { server_id: Uuid, datacenter_id: Uuid, nomad_node_id: String, } -#[operation(name = "cluster-datacenter-topology-get")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let datacenter_ids = ctx - .datacenter_ids - .iter() - .map(common::Uuid::as_uuid) - .collect::>(); +pub struct Input { + pub datacenter_ids: Vec, +} + +pub struct Output { + pub datacenters: Vec, +} + +pub struct Datacenter { + pub datacenter_id: Uuid, + pub servers: Vec, +} + +pub struct Server { + pub server_id: Uuid, + pub node_id: String, + pub usage: Stats, + pub limits: Stats, +} +pub struct Stats { + pub cpu: u64, + pub memory: u64, + pub disk: u64, +} + +#[operation] +pub async fn cluster_datacenter_topology_get( + ctx: &OperationCtx, + input: &Input, +) -> GlobalResult { let servers = sql_fetch_all!( - [ctx, Server] + [ctx, ServerRow] " SELECT server_id, datacenter_id, nomad_node_id @@ -37,7 +57,7 @@ pub async fn handle( cloud_destroy_ts IS NULL AND taint_ts IS NULL ", - &datacenter_ids, + &input.datacenter_ids, ) .await?; @@ -83,13 +103,14 @@ pub async fn handle( )?; // Preempt datacenters - let mut datacenters = datacenter_ids + let mut datacenters = input + .datacenter_ids .iter() .map(|datacenter_id| { ( *datacenter_id, - cluster::datacenter_topology_get::response::Datacenter { - datacenter_id: Some((*datacenter_id).into()), + Datacenter { + datacenter_id: *datacenter_id, servers: Vec::new(), }, ) @@ -97,7 +118,7 @@ pub async fn handle( .collect::>(); for server in servers { - let mut usage = cluster::datacenter_topology_get::response::Stats { + let mut usage = Stats { cpu: 0, memory: 0, disk: 0, @@ -146,7 +167,7 @@ pub async fn handle( format!("node not found {}", server.nomad_node_id) ); let resources = unwrap_ref!(node.node_resources); - let limits = cluster::datacenter_topology_get::response::Stats { + let limits = Stats { cpu: unwrap!(unwrap_ref!(resources.cpu).cpu_shares) as u64, memory: unwrap!(unwrap_ref!(resources.memory).memory_mb) as u64, disk: unwrap!(unwrap_ref!(resources.disk).disk_mb) as u64, @@ -154,17 +175,15 @@ pub async fn handle( let datacenter = unwrap!(datacenters.get_mut(&server.datacenter_id)); - datacenter - .servers - .push(cluster::datacenter_topology_get::response::Server { - server_id: Some(server.server_id.into()), - node_id: server.nomad_node_id, - usage: Some(usage), - limits: Some(limits), - }); + datacenter.servers.push(Server { + server_id: server.server_id, + node_id: server.nomad_node_id, + usage, + limits, + }); } - Ok(cluster::datacenter_topology_get::Response { - datacenters: datacenters.into_values().collect::>(), + Ok(Output { + datacenters: datacenters.into_values().collect(), }) } diff --git a/svc/pkg/cluster/src/ops/get.rs b/svc/pkg/cluster/src/ops/get.rs new file mode 100644 index 000000000..166b76afb --- /dev/null +++ b/svc/pkg/cluster/src/ops/get.rs @@ -0,0 +1,31 @@ +use chirp_workflow::prelude::*; + +use crate::types::Cluster; + +pub struct Input { + pub cluster_ids: Vec, +} + +pub struct Output { + pub clusters: Vec, +} + +#[operation] +pub async fn cluster_get(ctx: &OperationCtx, input: &Input) -> GlobalResult { + let clusters = sql_fetch_all!( + [ctx, Cluster] + " + SELECT + cluster_id, + name_id, + owner_team_id, + create_ts + FROM db_cluster.clusters + WHERE cluster_id = ANY($1) + ", + &input.cluster_ids, + ) + .await?; + + Ok(Output { clusters }) +} diff --git a/svc/pkg/cluster/src/ops/get_for_game.rs b/svc/pkg/cluster/src/ops/get_for_game.rs new file mode 100644 index 000000000..355eade9b --- /dev/null +++ b/svc/pkg/cluster/src/ops/get_for_game.rs @@ -0,0 +1,40 @@ +use chirp_workflow::prelude::*; + +pub struct Input { + pub game_ids: Vec, +} + +pub struct Output { + pub games: Vec, +} + +pub struct Game { + pub game_id: Uuid, + pub cluster_id: Uuid, +} + +#[operation] +pub async fn cluster_get_for_game(ctx: &OperationCtx, input: &Input) -> GlobalResult { + let rows = sql_fetch_optional!( + [ctx, (Uuid, Option)] + " + SELECT + g.game_id, gc.cluster_id + FROM unnest($1) AS g(game_id) + LEFT JOIN db_cluster.games AS gc + ON g.game_id = gc.game_id + ", + &input.game_ids, + ) + .await?; + + Ok(Output { + games: rows + .into_iter() + .map(|(game_id, cluster_id)| Game { + game_id, + cluster_id: cluster_id.unwrap_or_else(crate::util::default_cluster_id), + }) + .collect::>(), + }) +} diff --git a/svc/pkg/cluster/src/ops/list.rs b/svc/pkg/cluster/src/ops/list.rs new file mode 100644 index 000000000..91744c570 --- /dev/null +++ b/svc/pkg/cluster/src/ops/list.rs @@ -0,0 +1,24 @@ +use chirp_workflow::prelude::*; + +pub struct Input {} + +pub struct Output { + pub cluster_ids: Vec, +} + +#[operation] +pub async fn cluster_list(ctx: &OperationCtx, input: &Input) -> GlobalResult { + let cluster_ids = sql_fetch_all!( + [ctx, (Uuid,)] + " + SELECT cluster_id + FROM db_cluster.clusters + ", + ) + .await? + .into_iter() + .map(|(cluster_id,)| cluster_id) + .collect::>(); + + Ok(Output { cluster_ids }) +} diff --git a/svc/pkg/cluster/src/ops/mod.rs b/svc/pkg/cluster/src/ops/mod.rs new file mode 100644 index 000000000..d69e65b3d --- /dev/null +++ b/svc/pkg/cluster/src/ops/mod.rs @@ -0,0 +1,6 @@ +pub mod datacenter; +pub mod get; +pub mod get_for_game; +pub mod list; +pub mod resolve_for_name_id; +pub mod server; diff --git a/svc/pkg/cluster/src/ops/resolve_for_name_id.rs b/svc/pkg/cluster/src/ops/resolve_for_name_id.rs new file mode 100644 index 000000000..19923d682 --- /dev/null +++ b/svc/pkg/cluster/src/ops/resolve_for_name_id.rs @@ -0,0 +1,36 @@ +use chirp_workflow::prelude::*; + +pub struct Input { + pub name_ids: Vec, +} + +pub struct Output { + pub clusters: Vec, +} + +#[derive(sqlx::FromRow)] +pub struct Cluster { + pub cluster_id: Uuid, + pub name_id: String, +} + +#[operation] +pub async fn cluster_resolve_for_name_id( + ctx: &OperationCtx, + input: &Input, +) -> GlobalResult { + let clusters = sql_fetch_all!( + [ctx, Cluster] + " + SELECT + cluster_id, + name_id + FROM db_cluster.clusters + WHERE name_id = ANY($1) + ", + &input.name_ids, + ) + .await?; + + Ok(Output { clusters }) +} diff --git a/svc/pkg/cluster/ops/server-destroy-with-filter/src/lib.rs b/svc/pkg/cluster/src/ops/server/destroy_with_filter.rs similarity index 51% rename from svc/pkg/cluster/ops/server-destroy-with-filter/src/lib.rs rename to svc/pkg/cluster/src/ops/server/destroy_with_filter.rs index 7e2a367ca..a3a4fb635 100644 --- a/svc/pkg/cluster/ops/server-destroy-with-filter/src/lib.rs +++ b/svc/pkg/cluster/src/ops/server/destroy_with_filter.rs @@ -1,22 +1,33 @@ -use proto::backend::pkg::*; -use rivet_operation::prelude::*; use std::collections::HashSet; -#[operation(name = "cluster-server-destroy-with-filter")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let servers_res = op!([ctx] cluster_server_list { - filter: ctx.filter.clone(), - }) - .await?; +use chirp_workflow::prelude::*; +use rivet_operation::prelude::proto::backend::pkg::*; + +use crate::types::Filter; + +pub struct Input { + pub filter: Filter, +} + +pub struct Output {} + +#[operation] +pub async fn cluster_server_destroy_with_filter( + ctx: &OperationCtx, + input: &Input, +) -> GlobalResult { + let servers_res = ctx + .op(crate::ops::server::list::Input { + filter: input.filter.clone(), + include_destroyed: false, + }) + .await?; // Flag as destroyed let server_ids = servers_res .servers .iter() - .filter_map(|x| x.server_id) - .map(|x| x.as_uuid()) + .map(|x| x.server_id) .collect::>(); sql_execute!( [ctx] @@ -31,9 +42,9 @@ pub async fn handle( .await?; // Destroy server - for server_id in &server_ids { + for server_id in server_ids { msg!([ctx] cluster::msg::server_destroy(server_id) { - server_id: Some(server_id.clone().into()), + server_id: Some(server_id.into()), force: false, }) .await?; @@ -43,8 +54,7 @@ pub async fn handle( let dc_ids = servers_res .servers .iter() - .filter_map(|x| x.datacenter_id) - .map(|x| x.as_uuid()) + .map(|x| x.datacenter_id) .collect::>(); for dc_id in dc_ids { msg!([ctx] cluster::msg::datacenter_scale(dc_id) { @@ -53,5 +63,5 @@ pub async fn handle( .await?; } - Ok(cluster::server_destroy_with_filter::Response {}) + Ok(Output {}) } diff --git a/svc/pkg/cluster/src/ops/server/get.rs b/svc/pkg/cluster/src/ops/server/get.rs new file mode 100644 index 000000000..1f23e0294 --- /dev/null +++ b/svc/pkg/cluster/src/ops/server/get.rs @@ -0,0 +1,73 @@ +use std::{ + convert::{TryFrom, TryInto}, + net::IpAddr, +}; + +use chirp_workflow::prelude::*; + +use crate::types::{PoolType, Server}; + +pub struct Input { + pub server_ids: Vec, +} + +pub struct Output { + pub servers: Vec, +} + +#[derive(sqlx::FromRow)] +pub(crate) struct ServerRow { + server_id: Uuid, + datacenter_id: Uuid, + pool_type2: Option>, + pool_type: i64, + vlan_ip: Option, + public_ip: Option, + cloud_destroy_ts: Option, +} + +impl TryFrom for Server { + type Error = GlobalError; + + fn try_from(value: ServerRow) -> GlobalResult { + Ok(Server { + server_id: value.server_id, + datacenter_id: value.datacenter_id, + // Handle backwards compatibility + pool_type: if let Some(pool_type) = value.pool_type2 { + pool_type.0 + } else { + value.pool_type.try_into()? + }, + vlan_ip: value.vlan_ip, + public_ip: value.public_ip, + cloud_destroy_ts: value.cloud_destroy_ts, + }) + } +} + +#[operation] +pub async fn cluster_server_get(ctx: &OperationCtx, input: &Input) -> GlobalResult { + let servers = sql_fetch_all!( + [ctx, ServerRow] + " + SELECT + server_id, + datacenter_id, + pool_type, + pool_type2, + vlan_ip, + public_ip, + cloud_destroy_ts + FROM db_cluster.servers + WHERE server_id = ANY($1) + ", + &input.server_ids, + ) + .await? + .into_iter() + .map(TryInto::try_into) + .collect::>>()?; + + Ok(Output { servers }) +} diff --git a/svc/pkg/cluster/src/ops/server/list.rs b/svc/pkg/cluster/src/ops/server/list.rs new file mode 100644 index 000000000..2189bb0b7 --- /dev/null +++ b/svc/pkg/cluster/src/ops/server/list.rs @@ -0,0 +1,66 @@ +use std::{convert::TryInto, net::IpAddr}; + +use chirp_workflow::prelude::*; + +use super::get::ServerRow; +use crate::types::{Filter, Server}; + +pub struct Input { + pub filter: Filter, + pub include_destroyed: bool, +} + +pub struct Output { + pub servers: Vec, +} + +#[operation] +pub async fn cluster_server_list(ctx: &OperationCtx, input: &Input) -> GlobalResult { + let servers = sql_fetch_all!( + [ctx, ServerRow] + " + SELECT + s.server_id, + d.cluster_id, + s.datacenter_id, + s.pool_type, + s.vlan_ip, + s.public_ip, + s.cloud_destroy_ts + FROM db_cluster.servers AS s + JOIN db_cluster.datacenters AS d + ON s.datacenter_id = d.datacenter_id + WHERE + ($1 OR s.cloud_destroy_ts IS NULL) + AND ($2 IS NULL OR s.server_id = ANY($2)) + AND ($3 IS NULL OR s.datacenter_id = ANY($4)) + AND ($4 IS NULL OR d.cluster_id = ANY($3)) + AND ($5 IS NULL OR s.pool_type = ANY($5)) + AND ($6 IS NULL OR s.public_ip = ANY($6)) + ", + input.include_destroyed, + &input.filter.server_ids, + &input.filter.datacenter_ids, + &input.filter.cluster_ids, + input.filter.pool_types + .as_ref() + .map(|x| x.iter() + .cloned() + .map(Into::::into) + .collect::>() + ), + input.filter.public_ips + .as_ref() + .map(|x| x.iter() + .cloned() + .map(IpAddr::V4) + .collect::>() + ), + ) + .await? + .into_iter() + .map(TryInto::try_into) + .collect::>>()?; + + Ok(Output { servers }) +} diff --git a/svc/pkg/cluster/src/ops/server/mod.rs b/svc/pkg/cluster/src/ops/server/mod.rs new file mode 100644 index 000000000..c8d7718a9 --- /dev/null +++ b/svc/pkg/cluster/src/ops/server/mod.rs @@ -0,0 +1,4 @@ +pub mod destroy_with_filter; +pub mod get; +pub mod list; +pub mod resolve_for_ip; diff --git a/svc/pkg/cluster/src/ops/server/resolve_for_ip.rs b/svc/pkg/cluster/src/ops/server/resolve_for_ip.rs new file mode 100644 index 000000000..b48c2fe7d --- /dev/null +++ b/svc/pkg/cluster/src/ops/server/resolve_for_ip.rs @@ -0,0 +1,42 @@ +use std::net::{IpAddr, Ipv4Addr}; + +use chirp_workflow::prelude::*; + +pub struct Input { + pub ips: Vec, +} + +pub struct Output { + pub servers: Vec, +} + +#[derive(sqlx::FromRow)] +pub struct Server { + pub server_id: Uuid, + pub public_ip: IpAddr, +} + +#[operation] +pub async fn cluster_server_resolve_for_ip( + ctx: &OperationCtx, + input: &Input, +) -> GlobalResult { + let servers = sql_fetch_all!( + [ctx, Server] + " + SELECT + server_id, + public_ip + FROM db_cluster.servers + WHERE server_id = ANY($1) + ", + input.ips + .iter() + .cloned() + .map(IpAddr::V4) + .collect::>(), + ) + .await?; + + Ok(Output { servers }) +} diff --git a/svc/pkg/cluster/src/types.rs b/svc/pkg/cluster/src/types.rs new file mode 100644 index 000000000..21efbefa6 --- /dev/null +++ b/svc/pkg/cluster/src/types.rs @@ -0,0 +1,183 @@ +use std::{ + convert::{TryFrom, TryInto}, + net::{IpAddr, Ipv4Addr}, +}; + +use chirp_workflow::prelude::*; +use rivet_operation::prelude::proto::backend; +use serde::{Deserialize, Serialize}; + +#[derive(sqlx::FromRow)] +pub struct Cluster { + pub cluster_id: Uuid, + pub name_id: String, + pub owner_team_id: Option, + pub create_ts: i64, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Hash)] +pub struct Datacenter { + pub datacenter_id: Uuid, + pub cluster_id: Uuid, + pub name_id: String, + pub display_name: String, + pub provider: Provider, + pub provider_datacenter_id: String, + pub provider_api_token: Option, + pub pools: Vec, + pub build_delivery_method: BuildDeliveryMethod, + pub prebakes_enabled: bool, + pub create_ts: i64, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Hash)] +pub enum Provider { + Linode, +} + +// Backwards compatibility +impl TryFrom for Provider { + type Error = GlobalError; + + fn try_from(value: i64) -> GlobalResult { + match value { + 0 => Ok(Provider::Linode), + _ => bail!("unexpected Provider variant"), + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, Hash)] +pub struct Pool { + pub pool_type: PoolType, + pub hardware: Vec, + pub desired_count: u32, + pub min_count: u32, + pub max_count: u32, + pub drain_timeout: u64, +} + +// Backwards compatibility +impl TryFrom for Pool { + type Error = GlobalError; + + fn try_from(value: backend::cluster::Pool) -> GlobalResult { + Ok(Pool { + pool_type: (value.pool_type as i64).try_into()?, + hardware: value + .hardware + .iter() + .map(|h| Hardware { + provider_hardware: h.provider_hardware.clone(), + }) + .collect(), + desired_count: value.desired_count, + min_count: value.min_count, + max_count: value.max_count, + drain_timeout: value.drain_timeout, + }) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, Hash, PartialEq)] +pub enum PoolType { + Job, + Gg, + Ats, +} + +// Backwards compatibility +impl TryFrom for PoolType { + type Error = GlobalError; + + fn try_from(value: i64) -> GlobalResult { + match value { + 0 => Ok(PoolType::Job), + 1 => Ok(PoolType::Gg), + 2 => Ok(PoolType::Ats), + _ => bail!("unexpected PoolType variant"), + } + } +} +impl From for i64 { + fn from(value: PoolType) -> i64 { + match value { + PoolType::Job => 0, + PoolType::Gg => 1, + PoolType::Ats => 2, + } + } +} + +impl std::fmt::Display for PoolType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + PoolType::Job => write!(f, "job"), + PoolType::Gg => write!(f, "gg"), + PoolType::Ats => write!(f, "ats"), + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, Hash)] +pub struct Hardware { + pub provider_hardware: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Hash)] +pub enum BuildDeliveryMethod { + TrafficServer, + S3Direct, +} + +// Backwards compatibility +impl TryFrom for BuildDeliveryMethod { + type Error = GlobalError; + + fn try_from(value: i64) -> GlobalResult { + match value { + 0 => Ok(BuildDeliveryMethod::TrafficServer), + 1 => Ok(BuildDeliveryMethod::S3Direct), + _ => bail!("unexpected BuildDeliveryMethod variant"), + } + } +} + +pub struct Server { + pub server_id: Uuid, + pub datacenter_id: Uuid, + pub pool_type: PoolType, + pub vlan_ip: Option, + pub public_ip: Option, + pub cloud_destroy_ts: Option, +} + +#[derive(Clone)] +pub struct Filter { + pub server_ids: Option>, + pub datacenter_ids: Option>, + pub cluster_ids: Option>, + pub pool_types: Option>, + pub public_ips: Option>, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum TlsState { + Creating, + Active, + Renewing, +} + +// Backwards compatibility +impl TryFrom for TlsState { + type Error = GlobalError; + + fn try_from(value: i64) -> GlobalResult { + match value { + 0 => Ok(TlsState::Creating), + 1 => Ok(TlsState::Active), + 2 => Ok(TlsState::Renewing), + _ => bail!("unexpected TlsState variant"), + } + } +} diff --git a/svc/pkg/cluster/util/src/metrics.rs b/svc/pkg/cluster/src/util/metrics.rs similarity index 100% rename from svc/pkg/cluster/util/src/metrics.rs rename to svc/pkg/cluster/src/util/metrics.rs diff --git a/svc/pkg/cluster/src/util/mod.rs b/svc/pkg/cluster/src/util/mod.rs new file mode 100644 index 000000000..d277920e9 --- /dev/null +++ b/svc/pkg/cluster/src/util/mod.rs @@ -0,0 +1,36 @@ +use chirp_workflow::prelude::*; + +use crate::types::PoolType; + +pub mod metrics; +pub mod test; + +// Use the hash of the server install script in the image variant so that if the install scripts are updated +// we won't be using the old image anymore +pub const INSTALL_SCRIPT_HASH: &str = include_str!(concat!(env!("OUT_DIR"), "/hash.txt")); + +// TTL of the token written to prebake images. Prebake images are renewed before the token would expire +pub const SERVER_TOKEN_TTL: i64 = util::duration::days(30 * 6); + +#[derive(thiserror::Error, Debug)] +#[error("cloudflare: {source}")] +pub(crate) struct CloudflareError { + #[from] + source: anyhow::Error, +} + +// Cluster id for provisioning servers +pub fn default_cluster_id() -> Uuid { + Uuid::nil() +} + +pub fn server_name(provider_datacenter_id: &str, pool_type: PoolType, server_id: Uuid) -> String { + let ns = util::env::namespace(); + let pool_type_str = match pool_type { + PoolType::Job => "job", + PoolType::Gg => "gg", + PoolType::Ats => "ats", + }; + + format!("{ns}-{provider_datacenter_id}-{pool_type_str}-{server_id}",) +} diff --git a/svc/pkg/cluster/util/src/test.rs b/svc/pkg/cluster/src/util/test.rs similarity index 100% rename from svc/pkg/cluster/util/src/test.rs rename to svc/pkg/cluster/src/util/test.rs diff --git a/svc/pkg/cluster/src/workflows/cluster.rs b/svc/pkg/cluster/src/workflows/cluster.rs new file mode 100644 index 000000000..65b2d97da --- /dev/null +++ b/svc/pkg/cluster/src/workflows/cluster.rs @@ -0,0 +1,167 @@ +use chirp_workflow::prelude::*; +use futures_util::FutureExt; +use serde_json::json; + +use crate::types::{BuildDeliveryMethod, Pool, Provider}; + +#[derive(Debug, Serialize, Deserialize)] +pub struct Input { + pub cluster_id: Uuid, + pub name_id: String, + pub owner_team_id: Option, +} + +#[workflow] +pub async fn cluster(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResult<()> { + ctx.activity(InsertDbInput { + cluster_id: input.cluster_id, + name_id: input.name_id.clone(), + owner_team_id: input.owner_team_id, + }) + .await?; + + // For use in spawned threads + let cluster_id = input.cluster_id; + + loop { + match ctx.listen::
().await? { + Main::GameLink(sig) => { + // Run activity in a new thread + ctx.spawn(move |ctx| { + async move { + ctx.activity(GameLinkInput { + cluster_id, + game_id: sig.game_id, + }) + .await?; + + Ok(()) + } + .boxed() + }); + } + Main::DatacenterCreate(sig) => { + ctx.dispatch_tagged_workflow( + &json!({ + "datacenter_id": sig.datacenter_id, + }), + crate::workflows::datacenter::Input { + cluster_id: input.cluster_id, + datacenter_id: sig.datacenter_id, + name_id: sig.name_id, + display_name: sig.display_name, + + provider: sig.provider, + provider_datacenter_id: sig.provider_datacenter_id, + provider_api_token: sig.provider_api_token, + + pools: sig.pools, + + build_delivery_method: sig.build_delivery_method, + prebakes_enabled: sig.prebakes_enabled, + }, + ) + .await?; + } + } + } +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct InsertDbInput { + cluster_id: Uuid, + name_id: String, + owner_team_id: Option, +} + +#[activity(InsertDb)] +async fn insert_db(ctx: &ActivityCtx, input: &InsertDbInput) -> GlobalResult<()> { + sql_execute!( + [ctx] + " + INSERT INTO db_cluster.clusters ( + cluster_id, + name_id, + owner_team_id, + create_ts + ) + VALUES ($1, $2, $3, $4) + ", + input.cluster_id, + &input.name_id, + input.owner_team_id, + util::timestamp::now(), + ) + .await?; + + ctx.msg( + json!({ + "cluster_id": input.cluster_id, + }), + CreateComplete {}, + ) + .await?; + + Ok(()) +} + +#[message("cluster-create-complete")] +pub struct CreateComplete {} + +#[signal("cluster-game-link")] +pub struct GameLink { + pub game_id: Uuid, +} + +#[signal("cluster-datacenter-create")] +pub struct DatacenterCreate { + pub datacenter_id: Uuid, + pub name_id: String, + pub display_name: String, + + pub provider: Provider, + pub provider_datacenter_id: String, + pub provider_api_token: Option, + + pub pools: Vec, + + pub build_delivery_method: BuildDeliveryMethod, + pub prebakes_enabled: bool, +} +join_signal!(Main, [GameLink, DatacenterCreate]); + +#[message("cluster-game-link-complete")] +pub struct GameLinkComplete {} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct GameLinkInput { + cluster_id: Uuid, + game_id: Uuid, +} + +#[activity(GameLinkActivity)] +async fn game_link(ctx: &ActivityCtx, input: &GameLinkInput) -> GlobalResult<()> { + sql_execute!( + [ctx] + " + INSERT INTO db_cluster.games ( + game_id, + cluster_id + ) + VALUES ($1, $2) + ", + input.game_id, + input.cluster_id, + ) + .await?; + + ctx.msg( + json!({ + "cluster_id": input.cluster_id, + }), + GameLinkComplete {}, + ) + .await?; + + Ok(()) +} diff --git a/svc/pkg/cluster/src/workflows/datacenter/mod.rs b/svc/pkg/cluster/src/workflows/datacenter/mod.rs new file mode 100644 index 000000000..8b31c9e70 --- /dev/null +++ b/svc/pkg/cluster/src/workflows/datacenter/mod.rs @@ -0,0 +1,277 @@ +use chirp_workflow::prelude::*; +use futures_util::FutureExt; + +pub mod scale; +pub mod tls_issue; + +use crate::types::{BuildDeliveryMethod, Hardware, Pool, PoolType, Provider, TlsState}; + +#[derive(Debug, Serialize, Deserialize)] +pub(crate) struct Input { + pub cluster_id: Uuid, + pub datacenter_id: Uuid, + pub name_id: String, + pub display_name: String, + + pub provider: Provider, + pub provider_datacenter_id: String, + pub provider_api_token: Option, + + pub pools: Vec, + + pub build_delivery_method: BuildDeliveryMethod, + pub prebakes_enabled: bool, +} + +#[workflow] +pub(crate) async fn cluster_datacenter(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResult<()> { + ctx.activity(InsertDbInput { + cluster_id: input.cluster_id, + datacenter_id: input.datacenter_id, + name_id: input.name_id.clone(), + display_name: input.display_name.clone(), + + provider: input.provider.clone(), + provider_datacenter_id: input.provider_datacenter_id.clone(), + provider_api_token: input.provider_api_token.clone(), + + pools: input.pools.clone(), + + build_delivery_method: input.build_delivery_method.clone(), + prebakes_enabled: input.prebakes_enabled, + }) + .await?; + + // Wait for TLS issuing process + ctx.workflow(tls_issue::Input { + datacenter_id: input.datacenter_id, + renew: false, + }) + .await?; + + let datacenter_id = input.datacenter_id; + loop { + match ctx.listen::
().await? { + Main::Update(sig) => { + ctx.activity(UpdateDbInput { + datacenter_id, + pools: sig.pools, + prebakes_enabled: sig.prebakes_enabled, + }) + .await?; + + // Scale + ctx.signal(ctx.workflow_id(), Scale {}).await?; + } + Main::Scale(_) => { + ctx.workflow(scale::Input { datacenter_id }).await?; + } + Main::TlsRenew(_) => { + // Spawn in a different thread + ctx.spawn(move |ctx| { + ctx.workflow(tls_issue::Input { + datacenter_id, + renew: true, + }) + .boxed() + }); + } + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, Hash)] +struct InsertDbInput { + cluster_id: Uuid, + datacenter_id: Uuid, + name_id: String, + display_name: String, + + provider: Provider, + provider_datacenter_id: String, + provider_api_token: Option, + + pools: Vec, + + build_delivery_method: BuildDeliveryMethod, + prebakes_enabled: bool, +} + +#[activity(InsertDb)] +async fn insert_db(ctx: &ActivityCtx, input: &InsertDbInput) -> GlobalResult<()> { + let mut pools = input.pools.clone(); + + // Constrain the desired count + for pool in &mut pools { + pool.desired_count = pool.desired_count.max(pool.min_count).min(pool.max_count); + } + + let pools_buf = serde_json::to_string(&pools)?; + + rivet_pools::utils::crdb::tx(&ctx.crdb().await?, |tx| { + let ctx = ctx.clone(); + let input = input.clone(); + let pools_buf = pools_buf.clone(); + + async move { + sql_execute!( + [ctx, @tx tx] + " + INSERT INTO db_cluster.datacenters ( + datacenter_id, + cluster_id, + name_id, + display_name, + provider2, + provider_datacenter_id, + provider_api_token, + pools2, + build_delivery_method2, + prebakes_enabled, + create_ts, + + -- Backwards compatibility + provider, + pools, + build_delivery_method + ) + VALUES ( + $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, + 0, b'', 0 + ) + ", + input.datacenter_id, + input.cluster_id, + &input.name_id, + &input.display_name, + serde_json::to_string(&input.provider)?, + &input.provider_datacenter_id, + &input.provider_api_token, + pools_buf, + serde_json::to_string(&input.build_delivery_method)?, + input.prebakes_enabled, + util::timestamp::now(), + ) + .await?; + + // Insert TLS record + sql_execute!( + [ctx, @tx tx] + " + INSERT INTO db_cluster.datacenter_tls ( + datacenter_id, + state2, + expire_ts, + + -- Backwards compatibility + state + ) + VALUES ($1, $2, 0, 0) + ", + input.datacenter_id, + serde_json::to_string(&TlsState::Creating)?, + ) + .await?; + + Ok(()) + } + .boxed() + }) + .await?; + + Ok(()) +} + +#[signal("cluster-datacenter-update")] +pub struct Update { + pub pools: Vec, + pub prebakes_enabled: bool, +} + +#[signal("cluster-datacenter-scale")] +pub struct Scale {} + +#[signal("cluster-datacenter-tls-renew")] +pub struct TlsRenew {} + +join_signal!(Main, [Update, Scale, TlsRenew]); + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct UpdateDbInput { + datacenter_id: Uuid, + pools: Vec, + prebakes_enabled: bool, +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +pub struct PoolUpdate { + pub pool_type: PoolType, + + // Each can be optionally updated + pub hardware: Vec, + pub desired_count: Option, + pub min_count: Option, + pub max_count: Option, + pub drain_timeout: Option, +} + +#[activity(UpdateDb)] +async fn update_db(ctx: &ActivityCtx, input: &UpdateDbInput) -> GlobalResult<()> { + // Get current pools + let (pools,) = sql_fetch_one!( + [ctx, (sqlx::types::Json>,)] + " + SELECT pools2 FROM db_cluster.datacenters + WHERE datacenter_id = $1 + ", + input.datacenter_id, + ) + .await?; + let mut pools = pools.0; + + for pool in &input.pools { + let current_pool = unwrap!( + pools.iter_mut().find(|p| p.pool_type == pool.pool_type), + "attempting to update pool that doesn't exist in current config" + ); + + // Update pool config + if !pool.hardware.is_empty() { + current_pool.hardware.clone_from(&pool.hardware); + } + if let Some(desired_count) = pool.desired_count { + current_pool.desired_count = desired_count; + } + if let Some(min_count) = pool.min_count { + current_pool.min_count = min_count; + } + if let Some(max_count) = pool.max_count { + current_pool.max_count = max_count; + } + if let Some(drain_timeout) = pool.drain_timeout { + current_pool.drain_timeout = drain_timeout; + } + } + + sql_execute!( + [ctx] + " + UPDATE db_cluster.datacenters + SET + pools = $2, + prebakes_enabled = coalesce($3, prebakes_enabled) + WHERE datacenter_id = $1 + ", + input.datacenter_id, + serde_json::to_string(&pools)?, + input.prebakes_enabled, + ) + .await?; + + // Purge cache + ctx.cache() + .purge("cluster.datacenters", [input.datacenter_id]) + .await?; + + Ok(()) +} diff --git a/svc/pkg/cluster/worker/src/workers/datacenter_scale.rs b/svc/pkg/cluster/src/workflows/datacenter/scale.rs similarity index 70% rename from svc/pkg/cluster/worker/src/workers/datacenter_scale.rs rename to svc/pkg/cluster/src/workflows/datacenter/scale.rs index 5a16b9484..b625605ac 100644 --- a/svc/pkg/cluster/worker/src/workers/datacenter_scale.rs +++ b/svc/pkg/cluster/src/workflows/datacenter/scale.rs @@ -1,4 +1,3 @@ -use std::convert::{TryFrom, TryInto}; // TERMINOLOGY: // // server: a non-destroyed non-tainted server @@ -8,24 +7,25 @@ use std::convert::{TryFrom, TryInto}; // draining server: a server that is currently draining, not drained // drained server: a server that is finished draining // tainted server: a tainted server + use std::{ cmp::Ordering, collections::HashMap, - future::Future, + convert::{TryFrom, TryInto}, iter::{DoubleEndedIterator, Iterator}, - pin::Pin, }; -use chirp_worker::prelude::*; +use chirp_workflow::prelude::*; use futures_util::{FutureExt, StreamExt, TryStreamExt}; -use proto::backend::{self, pkg::*}; +use serde_json::json; -type MsgFuture = Pin> + Send>>; +use crate::types::{Datacenter, PoolType, Provider}; #[derive(sqlx::FromRow)] struct ServerRow { server_id: Uuid, pool_type: i64, + pool_type2: Option>, is_installed: bool, has_nomad_node: bool, is_draining: bool, @@ -35,7 +35,7 @@ struct ServerRow { struct Server { server_id: Uuid, - pool_type: backend::cluster::PoolType, + pool_type: PoolType, is_installed: bool, has_nomad_node: bool, drain_state: DrainState, @@ -48,7 +48,12 @@ impl TryFrom for Server { fn try_from(value: ServerRow) -> GlobalResult { Ok(Server { server_id: value.server_id, - pool_type: unwrap!(backend::cluster::PoolType::from_i32(value.pool_type as i32)), + // Handle backwards compatibility + pool_type: if let Some(pool_type) = value.pool_type2 { + pool_type.0 + } else { + value.pool_type.try_into()? + }, is_installed: value.is_installed, has_nomad_node: value.has_nomad_node, is_tainted: value.is_tainted, @@ -71,23 +76,135 @@ enum DrainState { struct PoolCtx { datacenter_id: Uuid, - provider: i32, - pool_type: backend::cluster::PoolType, + provider: Provider, + pool_type: PoolType, desired_count: usize, } -#[worker(name = "cluster-datacenter-scale")] -async fn worker( - ctx: &OperationContext, -) -> GlobalResult<()> { - let datacenter_id = unwrap_ref!(ctx.datacenter_id).as_uuid(); +#[derive(Debug, Serialize, Deserialize)] +pub struct Input { + datacenter_id: Uuid, +} + +#[workflow] +pub async fn cluster_scale(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResult<()> { + let diff = ctx + .activity(CalculateDiffInput { + datacenter_id: input.datacenter_id, + }) + .await?; + + // Publish all messages + if !diff.actions.is_empty() { + tracing::info!("dispatching signals"); + + futures_util::stream::iter( + diff.actions + .into_iter() + .map(|action| action.dispatch(ctx.clone(), input.datacenter_id).boxed()), + ) + .buffer_unordered(16) + .try_collect::>() + .await?; + } + + Ok(()) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct CalculateDiffInput { + datacenter_id: Uuid, +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct CalculateDiffOutput { + actions: Vec, +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +enum Action { + Provision { + server_id: Uuid, + pool_type: PoolType, + provider: Provider, + }, + Drain { + server_id: Uuid, + }, + Undrain { + server_id: Uuid, + }, + Destroy { + server_id: Uuid, + }, +} + +impl Action { + async fn dispatch(self, mut ctx: WorkflowCtx, datacenter_id: Uuid) -> GlobalResult<()> { + match self { + Action::Provision { + server_id, + pool_type, + provider, + } => { + ctx.dispatch_tagged_workflow( + &json!({ + "server_id": server_id, + }), + crate::workflows::server::Input { + datacenter_id, + server_id, + pool_type, + provider, + tags: Vec::new(), + }, + ) + .await?; + } + Action::Drain { server_id } => { + ctx.tagged_signal( + &json!({ + "server_id": server_id, + }), + crate::workflows::server::Drain {}, + ) + .await?; + } + Action::Undrain { server_id } => { + ctx.tagged_signal( + &json!({ + "server_id": server_id, + }), + crate::workflows::server::Undrain {}, + ) + .await?; + } + Action::Destroy { server_id } => { + ctx.tagged_signal( + &json!({ + "server_id": server_id, + }), + crate::workflows::server::Destroy {}, + ) + .await?; + } + } + Ok(()) + } +} + +#[activity(CalculateDiff)] +async fn CalculateDiff( + ctx: &ActivityCtx, + input: &CalculateDiffInput, +) -> GlobalResult { let (datacenter_res, topology_res) = tokio::try_join!( - op!([ctx] cluster_datacenter_get { - datacenter_ids: vec![datacenter_id.into()], + ctx.op(crate::ops::datacenter::get::Input { + datacenter_ids: vec![input.datacenter_id], }), - op!([ctx] cluster_datacenter_topology_get { - datacenter_ids: vec![datacenter_id.into()], + ctx.op(crate::ops::datacenter::topology_get::Input { + datacenter_ids: vec![input.datacenter_id], }), )?; @@ -97,17 +214,12 @@ async fn worker( let memory_by_server = topology .servers .iter() - .map(|server| { - Ok(( - unwrap_ref!(server.server_id).as_uuid(), - unwrap_ref!(server.usage).memory, - )) - }) + .map(|server| Ok((server.server_id, server.usage.memory))) .collect::>>()?; // Run everything in a locking transaction - let msgs = rivet_pools::utils::crdb::tx(&ctx.crdb().await?, |tx| { - let ctx = ctx.base(); + let actions = rivet_pools::utils::crdb::tx(&ctx.crdb().await?, |tx| { + let ctx = ctx.clone(); let dc = dc.clone(); let memory_by_server = memory_by_server.clone(); @@ -115,32 +227,20 @@ async fn worker( }) .await?; - // Publish all messages - if !msgs.is_empty() { - tracing::info!("transaction successful, publishing messages"); - - futures_util::stream::iter(msgs) - .buffer_unordered(16) - .try_collect::>() - .await?; - } - - Ok(()) + Ok(CalculateDiffOutput { actions }) } async fn inner( - ctx: OperationContext<()>, + ctx: ActivityCtx, tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, - dc: backend::cluster::Datacenter, + dc: Datacenter, memory_by_server: HashMap, -) -> GlobalResult> { - let datacenter_id = unwrap_ref!(dc.datacenter_id).as_uuid(); - +) -> GlobalResult> { let servers = sql_fetch_all!( [ctx, ServerRow, @tx tx] " SELECT - server_id, pool_type, + server_id, pool_type2, (install_complete_ts IS NOT NULL) AS is_installed, (nomad_node_id IS NOT NULL) AS has_nomad_node, (drain_ts IS NOT NULL) AS is_draining, @@ -155,7 +255,7 @@ async fn inner( ORDER BY create_ts DESC FOR UPDATE ", - datacenter_id, + dc.datacenter_id, ) .await?; @@ -170,36 +270,36 @@ async fn inner( // TODO: RVT-3732 Sort gg and ats servers by cpu usage // servers.sort_by_key - let mut msgs = Vec::new(); + let mut actions = Vec::new(); // NOTE: Can't parallelize because this is in a transaction for pool in &dc.pools { let pool_ctx = PoolCtx { - datacenter_id, - provider: dc.provider, - pool_type: unwrap!(backend::cluster::PoolType::from_i32(pool.pool_type)), + datacenter_id: dc.datacenter_id, + provider: dc.provider.clone(), + pool_type: pool.pool_type.clone(), desired_count: pool.desired_count.max(pool.min_count).min(pool.max_count) as usize, }; - scale_servers(&ctx, tx, &mut msgs, &servers, &pool_ctx).await?; + scale_servers(&ctx, tx, &mut actions, &servers, &pool_ctx).await?; match pool_ctx.pool_type { - backend::cluster::PoolType::Job => { - cleanup_tainted_job_servers(&ctx, tx, &mut msgs, &servers, &pool_ctx).await? + PoolType::Job => { + cleanup_tainted_job_servers(&ctx, tx, &mut actions, &servers, &pool_ctx).await? } - _ => cleanup_tainted_servers(&ctx, tx, &mut msgs, &servers, &pool_ctx).await?, + _ => cleanup_tainted_servers(&ctx, tx, &mut actions, &servers, &pool_ctx).await?, } } - destroy_drained_servers(&ctx, tx, &mut msgs, &servers).await?; + destroy_drained_servers(&ctx, tx, &mut actions, &servers).await?; - Ok(msgs) + Ok(actions) } async fn scale_servers( - ctx: &OperationContext<()>, + ctx: &ActivityCtx, tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, - msgs: &mut Vec, + actions: &mut Vec, servers: &[Server], pctx: &PoolCtx, ) -> GlobalResult<()> { @@ -217,21 +317,21 @@ async fn scale_servers( match pctx.desired_count.cmp(&active_count) { Ordering::Less => match pctx.pool_type { - backend::cluster::PoolType::Job => { - scale_down_job_servers(ctx, tx, msgs, pctx, active_servers_in_pool, active_count) + PoolType::Job => { + scale_down_job_servers(ctx, tx, actions, pctx, active_servers_in_pool, active_count) .await? } - backend::cluster::PoolType::Gg => { - scale_down_gg_servers(ctx, tx, msgs, pctx, active_servers_in_pool, active_count) + PoolType::Gg => { + scale_down_gg_servers(ctx, tx, actions, pctx, active_servers_in_pool, active_count) .await? } - backend::cluster::PoolType::Ats => { - scale_down_ats_servers(ctx, tx, msgs, pctx, active_servers_in_pool, active_count) + PoolType::Ats => { + scale_down_ats_servers(ctx, tx, actions, pctx, active_servers_in_pool, active_count) .await? } }, Ordering::Greater => { - scale_up_servers(ctx, tx, msgs, pctx, servers_in_pool, active_count).await?; + scale_up_servers(ctx, tx, actions, pctx, servers_in_pool, active_count).await?; } Ordering::Equal => {} } @@ -240,9 +340,9 @@ async fn scale_servers( } async fn scale_down_job_servers<'a, I: Iterator>( - ctx: &OperationContext<()>, + ctx: &ActivityCtx, tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, - msgs: &mut Vec, + actions: &mut Vec, pctx: &PoolCtx, active_servers: I, active_count: usize, @@ -269,7 +369,7 @@ async fn scale_down_job_servers<'a, I: Iterator>( .take(destroy_count) .map(|server| server.server_id); - destroy_servers(ctx, tx, msgs, destroy_candidates).await?; + destroy_servers(ctx, tx, actions, destroy_candidates).await?; } // Drain servers @@ -282,16 +382,16 @@ async fn scale_down_job_servers<'a, I: Iterator>( .take(drain_count) .map(|server| server.server_id); - drain_servers(ctx, tx, msgs, drain_candidates).await?; + drain_servers(ctx, tx, actions, drain_candidates).await?; } Ok(()) } async fn scale_down_gg_servers<'a, I: Iterator + DoubleEndedIterator + Clone>( - ctx: &OperationContext<()>, + ctx: &ActivityCtx, tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, - msgs: &mut Vec, + actions: &mut Vec, pctx: &PoolCtx, active_servers: I, active_count: usize, @@ -314,7 +414,7 @@ async fn scale_down_gg_servers<'a, I: Iterator + DoubleEndedI .take(drain_count) .map(|server| server.server_id); - drain_servers(ctx, tx, msgs, drain_candidates).await?; + drain_servers(ctx, tx, actions, drain_candidates).await?; } Ok(()) @@ -324,9 +424,9 @@ async fn scale_down_ats_servers< 'a, I: Iterator + DoubleEndedIterator + Clone, >( - ctx: &OperationContext<()>, + ctx: &ActivityCtx, tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, - msgs: &mut Vec, + actions: &mut Vec, pctx: &PoolCtx, active_servers: I, active_count: usize, @@ -349,16 +449,16 @@ async fn scale_down_ats_servers< .take(drain_count) .map(|server| server.server_id); - drain_servers(ctx, tx, msgs, drain_candidates).await?; + drain_servers(ctx, tx, actions, drain_candidates).await?; } Ok(()) } async fn scale_up_servers<'a, I: Iterator + Clone>( - ctx: &OperationContext<()>, + ctx: &ActivityCtx, tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, - msgs: &mut Vec, + actions: &mut Vec, pctx: &PoolCtx, servers: I, active_count: usize, @@ -391,7 +491,7 @@ async fn scale_up_servers<'a, I: Iterator + Clone>( .take(undrain_count) .map(|server| server.server_id); - undrain_servers(ctx, tx, msgs, undrain_candidates).await?; + undrain_servers(ctx, tx, actions, undrain_candidates).await?; } // Create new servers @@ -399,7 +499,7 @@ async fn scale_up_servers<'a, I: Iterator + Clone>( tracing::info!(count=%provision_count, "provisioning servers"); for _ in 0..provision_count { - provision_server(ctx, tx, msgs, pctx).await?; + provision_server(ctx, tx, actions, pctx).await?; } } @@ -407,9 +507,9 @@ async fn scale_up_servers<'a, I: Iterator + Clone>( } async fn cleanup_tainted_job_servers( - ctx: &OperationContext<()>, + ctx: &ActivityCtx, tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, - msgs: &mut Vec, + actions: &mut Vec, servers: &[Server], pctx: &PoolCtx, ) -> GlobalResult<()> { @@ -455,7 +555,7 @@ async fn cleanup_tainted_job_servers( destroy_servers( ctx, tx, - msgs, + actions, without_nomad_servers .iter() .take(destroy_count) @@ -477,7 +577,7 @@ async fn cleanup_tainted_job_servers( drain_servers( ctx, tx, - msgs, + actions, nomad_servers .iter() .take(drain_count) @@ -490,9 +590,9 @@ async fn cleanup_tainted_job_servers( } async fn cleanup_tainted_servers( - ctx: &OperationContext<()>, + ctx: &ActivityCtx, tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, - msgs: &mut Vec, + actions: &mut Vec, servers: &[Server], pctx: &PoolCtx, ) -> GlobalResult<()> { @@ -532,7 +632,7 @@ async fn cleanup_tainted_servers( drain_servers( ctx, tx, - msgs, + actions, active_tainted_servers_in_pool .take(drain_count) .map(|server| server.server_id), @@ -545,9 +645,9 @@ async fn cleanup_tainted_servers( // Destroys all drained servers (including tainted drained servers) async fn destroy_drained_servers( - ctx: &OperationContext<()>, + ctx: &ActivityCtx, tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, - msgs: &mut Vec, + actions: &mut Vec, servers: &[Server], ) -> GlobalResult<()> { let drained_server_ids = servers @@ -562,13 +662,13 @@ async fn destroy_drained_servers( tracing::info!(count=%drained_server_ids.len(), "destroying drained servers"); - destroy_servers(ctx, tx, msgs, drained_server_ids.into_iter()).await + destroy_servers(ctx, tx, actions, drained_server_ids.into_iter()).await } async fn drain_servers + Clone>( - ctx: &OperationContext<()>, + ctx: &ActivityCtx, tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, - msgs: &mut Vec, + actions: &mut Vec, server_ids: I, ) -> GlobalResult<()> { tracing::info!(count=%server_ids.clone().count(), "draining servers"); @@ -586,26 +686,15 @@ async fn drain_servers + Clone>( ) .await?; - msgs.extend(server_ids.map(|server_id| { - let ctx = ctx.base(); - async move { - tracing::info!(%server_id, "draining server"); - - msg!([ctx] cluster::msg::server_drain(server_id) { - server_id: Some(server_id.into()), - }) - .await - } - .boxed() - })); + actions.extend(server_ids.map(|server_id| Action::Drain { server_id })); Ok(()) } async fn undrain_servers + Clone>( - ctx: &OperationContext<()>, + ctx: &ActivityCtx, tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, - msgs: &mut Vec, + actions: &mut Vec, server_ids: I, ) -> GlobalResult<()> { tracing::info!(count=%server_ids.clone().count(), "undraining servers"); @@ -622,26 +711,15 @@ async fn undrain_servers + Clone>( ) .await?; - msgs.extend(server_ids.map(|server_id| { - let ctx = ctx.base(); - async move { - tracing::info!(%server_id, "undraining server"); - - msg!([ctx] cluster::msg::server_undrain(server_id) { - server_id: Some(server_id.into()), - }) - .await - } - .boxed() - })); + actions.extend(server_ids.map(|server_id| Action::Undrain { server_id })); Ok(()) } async fn provision_server( - ctx: &OperationContext<()>, + ctx: &ActivityCtx, tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, - msgs: &mut Vec, + actions: &mut Vec, pctx: &PoolCtx, ) -> GlobalResult<()> { let server_id = Uuid::new_v4(); @@ -653,45 +731,34 @@ async fn provision_server( INSERT INTO db_cluster.servers ( server_id, datacenter_id, - pool_type, - create_ts + pool_type2, + create_ts, + + -- Backwards compatibility + pool_type ) - VALUES ($1, $2, $3, $4) + VALUES ($1, $2, $3, $4, 0) ", server_id, pctx.datacenter_id, - pctx.pool_type as i64, + serde_json::to_string(&pctx.pool_type)?, util::timestamp::now(), ) .await?; - let ctx = ctx.base(); - let datacenter_id = pctx.datacenter_id; - let provider = pctx.provider; - let pool_type = pctx.pool_type; - - msgs.push( - async move { - tracing::info!(%server_id, "provisioning server"); - msg!([ctx] cluster::msg::server_provision(server_id) { - datacenter_id: Some(datacenter_id.into()), - server_id: Some(server_id.into()), - pool_type: pool_type as i32, - provider: provider, - tags: Vec::new(), - }) - .await - } - .boxed(), - ); + actions.push(Action::Provision { + server_id, + pool_type: pctx.pool_type.clone(), + provider: pctx.provider.clone(), + }); Ok(()) } async fn destroy_servers + Clone>( - ctx: &OperationContext<()>, + ctx: &ActivityCtx, tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, - msgs: &mut Vec, + actions: &mut Vec, server_ids: I, ) -> GlobalResult<()> { tracing::info!(count=%server_ids.clone().count(), "destroying servers"); @@ -709,19 +776,7 @@ async fn destroy_servers + Clone>( ) .await?; - msgs.extend(server_ids.map(|server_id| { - let ctx = ctx.base(); - async move { - tracing::info!(%server_id, "destroying server"); - - msg!([ctx] cluster::msg::server_destroy(server_id) { - server_id: Some(server_id.into()), - force: false, - }) - .await - } - .boxed() - })); + actions.extend(server_ids.map(|server_id| Action::Destroy { server_id })); Ok(()) } diff --git a/svc/pkg/cluster/worker/src/workers/datacenter_tls_issue.rs b/svc/pkg/cluster/src/workflows/datacenter/tls_issue.rs similarity index 71% rename from svc/pkg/cluster/worker/src/workers/datacenter_tls_issue.rs rename to svc/pkg/cluster/src/workflows/datacenter/tls_issue.rs index 91b28afa7..a9dd35748 100644 --- a/svc/pkg/cluster/worker/src/workers/datacenter_tls_issue.rs +++ b/svc/pkg/cluster/src/workflows/datacenter/tls_issue.rs @@ -1,12 +1,11 @@ use acme_lib::{ create_p384_key, persist::{MemoryPersist, Persist, PersistKey, PersistKind}, - Account, Certificate, Directory, DirectoryUrl, + Account, Directory, DirectoryUrl, }; -use chirp_worker::prelude::*; +use chirp_workflow::prelude::*; use cloudflare::{endpoints as cf, framework as cf_framework, framework::async_api::ApiClient}; use futures_util::StreamExt; -use proto::backend::{self, pkg::*}; use tokio::task; use trust_dns_resolver::{ config::{ResolverConfig, ResolverOpts}, @@ -14,30 +13,22 @@ use trust_dns_resolver::{ TokioAsyncResolver, }; -use crate::util::CloudflareError; +use crate::{types::TlsState, util::CloudflareError}; const ENCRYPT_EMAIL: &str = "letsencrypt@rivet.gg"; -#[worker(name = "cluster-datacenter-tls-issue", timeout = 300)] -async fn worker( - ctx: &OperationContext, -) -> GlobalResult<()> { - tracing::warn!("temp disabled"); - return Ok(()); - - let datacenter_id = unwrap_ref!(ctx.datacenter_id).as_uuid(); - - // Create CF client - let cf_token = util::env::read_secret(&["cloudflare", "terraform", "auth_token"]).await?; - let client = cf_framework::async_api::Client::new( - cf_framework::auth::Credentials::UserAuthToken { token: cf_token }, - Default::default(), - cf_framework::Environment::Production, - ) - .map_err(CloudflareError::from)?; +#[derive(Debug, Serialize, Deserialize)] +pub(crate) struct Input { + pub datacenter_id: Uuid, + pub renew: bool, +} - // Fetch ACME account registration - let account = acme_account().await?; +#[workflow] +pub(crate) async fn cluster_datacenter_tls_issue( + ctx: &mut WorkflowCtx, + input: &Input, +) -> GlobalResult<()> { + let datacenter_id = input.datacenter_id; let base_zone_id = unwrap!( util::env::cloudflare::zone::main::id(), @@ -47,104 +38,68 @@ async fn worker( let domain_main = unwrap!(util::env::domain_main(), "dns not enabled"); let domain_job = unwrap!(util::env::domain_job(), "dns not enabled"); - // NOTE: We don't use try_join because these run in parallel, the dns record needs to be deleted for each - // order upon failure - let (gg_cert, job_cert) = tokio::join!( - order( - &client, - ctx.renew, - base_zone_id, - &account, - domain_main, - vec![format!("*.{datacenter_id}.{domain_main}")], - ), - order( - &client, - ctx.renew, - job_zone_id, - &account, - domain_job, - vec![ - format!("*.lobby.{datacenter_id}.{domain_job}"), - format!("*.{datacenter_id}.{domain_job}"), - ], - ), - ); - let (gg_cert, job_cert) = (gg_cert?, job_cert?); + let (gg_cert, job_cert) = ctx + .join(( + OrderInput { + renew: input.renew, + zone_id: base_zone_id.to_string(), + common_name: domain_main.to_string(), + subject_alternative_names: vec![format!("*.{datacenter_id}.{domain_main}")], + }, + OrderInput { + renew: input.renew, + zone_id: job_zone_id.to_string(), + common_name: domain_job.to_string(), + subject_alternative_names: vec![ + format!("*.lobby.{datacenter_id}.{domain_job}"), + format!("*.{datacenter_id}.{domain_job}"), + ], + }, + )) + .await?; - sql_execute!( - [ctx] - " - UPDATE db_cluster.datacenter_tls - SET - gg_cert_pem = $2, - gg_private_key_pem = $3, - job_cert_pem = $4, - job_private_key_pem = $5, - state = $6, - expire_ts = $7 - WHERE datacenter_id = $1 - ", - datacenter_id, - gg_cert.certificate(), - gg_cert.private_key(), - job_cert.certificate(), - job_cert.private_key(), - backend::cluster::TlsState::Active as i64, - util::timestamp::now() + util::duration::days(gg_cert.valid_days_left()), - ) + ctx.activity(InsertDbInput { + datacenter_id: input.datacenter_id, + gg_cert: gg_cert.cert, + gg_private_key: gg_cert.private_key, + job_cert: job_cert.cert, + job_private_key: job_cert.private_key, + expire_ts: gg_cert.expire_ts, + }) .await?; Ok(()) } -async fn acme_account() -> GlobalResult> { - let url = match util::env::var("TLS_ACME_DIRECTORY")?.as_str() { - "lets_encrypt" => DirectoryUrl::LetsEncrypt, - "lets_encrypt_staging" => DirectoryUrl::LetsEncryptStaging, - x => bail!(format!("unknown ACME directory: {x}")), - }; - - let persist = MemoryPersist::new(); - - // Write account private key (from terraform) to persistence - let pem_key = PersistKey::new( - ENCRYPT_EMAIL, - PersistKind::AccountPrivateKey, - "acme_account", - ); - let pem = util::env::var("TLS_ACME_ACCOUNT_PRIVATE_KEY_PEM")?; - persist.put(&pem_key, pem.as_bytes())?; +#[derive(Debug, Clone, Serialize, Deserialize, Hash)] +struct OrderInput { + renew: bool, + zone_id: String, + common_name: String, + subject_alternative_names: Vec, +} - // Get ACME account info - let acc = tokio::task::spawn_blocking(move || { - // Initialize ACME directory - let dir = Directory::from_url(persist, url)?; +#[derive(Debug, Clone, Serialize, Deserialize, Hash)] +struct OrderOutput { + cert: String, + private_key: String, + expire_ts: i64, +} - tracing::info!("fetching account"); - dir.account(ENCRYPT_EMAIL) - }) - .await??; +#[activity(Order)] +async fn order(ctx: &ActivityCtx, input: &OrderInput) -> GlobalResult { + let client = cf_client().await?; - Ok(acc) -} + // Fetch ACME account registration + let account = acme_account().await?; -// TODO: This function contains both blocking calls that cannot be shared between threads and async calls. -// Maybe theres a way to defer the blocking calls somehow -async fn order( - client: &cf_framework::async_api::Client, - renew: bool, - zone_id: &str, - account: &Account

, - common_name: &str, - subject_alternative_names: Vec, -) -> GlobalResult { - tracing::info!(cn=%common_name, "creating order"); + tracing::info!(cn=%input.common_name, "creating order"); let mut order = task::block_in_place(|| { account.new_order( - common_name, - &subject_alternative_names + &input.common_name, + &input + .subject_alternative_names .iter() .map(|s| s.as_str()) .collect::>(), @@ -153,11 +108,15 @@ async fn order( // When not renewing, if the ownership of the domain(s) have already been authorized in a previous order // we might be able to skip validation. The ACME API provider decides. - let order_csr = if let Some(order_csr) = renew.then(|| order.confirm_validations()).flatten() { + let order_csr = if let Some(order_csr) = + input.renew.then(|| order.confirm_validations()).flatten() + { order_csr } else { + let client = &client; + loop { - tracing::info!(%common_name, "fetching authorizations"); + tracing::info!(cn=%input.common_name, "fetching authorizations"); let auths = task::block_in_place(|| order.authorizations())?; // Run authorizations in parallel @@ -168,7 +127,7 @@ async fn order( let hostname = format!("_acme-challenge.{}", auth.api_auth().identifier.value); let dns_record_id = - create_dns_record(client, zone_id, &hostname, &proof).await?; + create_dns_record(client, &input.zone_id, &hostname, &proof).await?; let try_block = async { // Wait for DNS to propagate @@ -182,7 +141,7 @@ async fn order( .await; // Delete regardless of success of the above try block - delete_dns_record(client, zone_id, &dns_record_id).await?; + delete_dns_record(client, &input.zone_id, &dns_record_id).await?; try_block } @@ -214,7 +173,42 @@ async fn order( tracing::info!("order finalized"); - Ok(cert) + Ok(OrderOutput { + cert: cert.certificate().to_string(), + private_key: cert.private_key().to_string(), + expire_ts: util::timestamp::now() + util::duration::days(cert.valid_days_left()), + }) +} + +async fn acme_account() -> GlobalResult> { + let url = match util::env::var("TLS_ACME_DIRECTORY")?.as_str() { + "lets_encrypt" => DirectoryUrl::LetsEncrypt, + "lets_encrypt_staging" => DirectoryUrl::LetsEncryptStaging, + x => bail!(format!("unknown ACME directory: {x}")), + }; + + let persist = MemoryPersist::new(); + + // Write account private key (from terraform) to persistence + let pem_key = PersistKey::new( + ENCRYPT_EMAIL, + PersistKind::AccountPrivateKey, + "acme_account", + ); + let pem = util::env::var("TLS_ACME_ACCOUNT_PRIVATE_KEY_PEM")?; + persist.put(&pem_key, pem.as_bytes())?; + + // Get ACME account info + let acc = tokio::task::spawn_blocking(move || { + // Initialize ACME directory + let dir = Directory::from_url(persist, url)?; + + tracing::info!("fetching account"); + dir.account(ENCRYPT_EMAIL) + }) + .await??; + + Ok(acc) } async fn create_dns_record( @@ -328,3 +322,54 @@ async fn poll_txt_dns(hostname: &str, content: &str) -> GlobalResult<()> { bail!("dns not resolved"); } + +async fn cf_client() -> GlobalResult { + // Create CF client + let cf_token = util::env::read_secret(&["cloudflare", "terraform", "auth_token"]).await?; + let client = cf_framework::async_api::Client::new( + cf_framework::auth::Credentials::UserAuthToken { token: cf_token }, + Default::default(), + cf_framework::Environment::Production, + ) + .map_err(CloudflareError::from)?; + + Ok(client) +} + +#[derive(Debug, Clone, Serialize, Deserialize, Hash)] +struct InsertDbInput { + datacenter_id: Uuid, + gg_cert: String, + gg_private_key: String, + job_cert: String, + job_private_key: String, + expire_ts: i64, +} + +#[activity(InsertDb)] +async fn insert_db(ctx: &ActivityCtx, input: &InsertDbInput) -> GlobalResult<()> { + sql_execute!( + [ctx] + " + UPDATE db_cluster.datacenter_tls + SET + gg_cert_pem = $2, + gg_private_key_pem = $3, + job_cert_pem = $4, + job_private_key_pem = $5, + state2 = $6, + expire_ts = $7 + WHERE datacenter_id = $1 + ", + input.datacenter_id, + &input.gg_cert, + &input.gg_private_key, + &input.job_cert, + &input.job_private_key, + serde_json::to_string(&TlsState::Active)?, + input.expire_ts, + ) + .await?; + + Ok(()) +} diff --git a/svc/pkg/cluster/src/workflows/mod.rs b/svc/pkg/cluster/src/workflows/mod.rs new file mode 100644 index 000000000..6fa0aadfe --- /dev/null +++ b/svc/pkg/cluster/src/workflows/mod.rs @@ -0,0 +1,3 @@ +pub mod cluster; +pub mod datacenter; +pub mod server; diff --git a/svc/pkg/cluster/worker/src/workers/server_destroy.rs b/svc/pkg/cluster/src/workflows/server/destroy.rs similarity index 99% rename from svc/pkg/cluster/worker/src/workers/server_destroy.rs rename to svc/pkg/cluster/src/workflows/server/destroy.rs index fabf823f4..999670e23 100644 --- a/svc/pkg/cluster/worker/src/workers/server_destroy.rs +++ b/svc/pkg/cluster/src/workflows/server/destroy.rs @@ -11,7 +11,6 @@ struct Server { #[worker(name = "cluster-server-destroy")] async fn worker(ctx: &OperationContext) -> GlobalResult<()> { - // TODO: RVTEE-75 rivet_pools::utils::crdb::tx(&ctx.crdb().await?, |tx| inner(ctx.clone(), tx).boxed()).await?; Ok(()) diff --git a/svc/pkg/cluster/worker/src/workers/server_dns_create.rs b/svc/pkg/cluster/src/workflows/server/dns_create.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_dns_create.rs rename to svc/pkg/cluster/src/workflows/server/dns_create.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_dns_delete.rs b/svc/pkg/cluster/src/workflows/server/dns_delete.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_dns_delete.rs rename to svc/pkg/cluster/src/workflows/server/dns_delete.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_drain.rs b/svc/pkg/cluster/src/workflows/server/drain.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_drain.rs rename to svc/pkg/cluster/src/workflows/server/drain.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/mod.rs b/svc/pkg/cluster/src/workflows/server/install/install_scripts/components/mod.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/mod.rs rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/components/mod.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/nomad.rs b/svc/pkg/cluster/src/workflows/server/install/install_scripts/components/nomad.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/nomad.rs rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/components/nomad.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/ok_server.rs b/svc/pkg/cluster/src/workflows/server/install/install_scripts/components/ok_server.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/ok_server.rs rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/components/ok_server.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/rivet.rs b/svc/pkg/cluster/src/workflows/server/install/install_scripts/components/rivet.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/rivet.rs rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/components/rivet.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/s3.rs b/svc/pkg/cluster/src/workflows/server/install/install_scripts/components/s3.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/s3.rs rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/components/s3.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/traefik.rs b/svc/pkg/cluster/src/workflows/server/install/install_scripts/components/traefik.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/traefik.rs rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/components/traefik.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/traffic_server.rs b/svc/pkg/cluster/src/workflows/server/install/install_scripts/components/traffic_server.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/traffic_server.rs rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/components/traffic_server.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/vector.rs b/svc/pkg/cluster/src/workflows/server/install/install_scripts/components/vector.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/vector.rs rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/components/vector.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/cni_plugins.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/cni_plugins.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/cni_plugins.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/cni_plugins.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/docker.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/docker.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/docker.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/docker.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/node_exporter.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/node_exporter.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/node_exporter.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/node_exporter.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/nomad_configure.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/nomad_configure.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/nomad_configure.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/nomad_configure.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/nomad_install.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/nomad_install.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/nomad_install.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/nomad_install.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/ok_server.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/ok_server.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/ok_server.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/ok_server.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/rivet_create_hook.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/rivet_create_hook.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/rivet_create_hook.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/rivet_create_hook.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/rivet_fetch_info.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/rivet_fetch_info.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/rivet_fetch_info.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/rivet_fetch_info.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/rivet_fetch_tls.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/rivet_fetch_tls.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/rivet_fetch_tls.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/rivet_fetch_tls.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/sysctl.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/sysctl.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/sysctl.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/sysctl.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traefik.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traefik.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traefik.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traefik.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traefik_instance.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traefik_instance.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traefik_instance.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traefik_instance.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/cache.config b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/cache.config similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/cache.config rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/cache.config diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/hosting.config b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/hosting.config similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/hosting.config rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/hosting.config diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/ip_allow.yaml b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/ip_allow.yaml similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/ip_allow.yaml rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/ip_allow.yaml diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/logging.yaml b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/logging.yaml similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/logging.yaml rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/logging.yaml diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/parent.config b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/parent.config similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/parent.config rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/parent.config diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/plugin.config b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/plugin.config similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/plugin.config rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/plugin.config diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/records.config b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/records.config similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/records.config rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/records.config diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/sni.yaml b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/sni.yaml similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/sni.yaml rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/sni.yaml diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/socks.config b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/socks.config similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/socks.config rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/socks.config diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/splitdns.config b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/splitdns.config similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/splitdns.config rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/splitdns.config diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/ssl_multicert.config b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/ssl_multicert.config similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/ssl_multicert.config rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/ssl_multicert.config diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/strategies.yaml b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/strategies.yaml similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/strategies.yaml rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/strategies.yaml diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/strip_headers.lua b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/strip_headers.lua similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/strip_headers.lua rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/strip_headers.lua diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/trafficserver-release b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/trafficserver-release similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/trafficserver-release rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/trafficserver-release diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/volume.config b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/volume.config similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/volume.config rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/volume.config diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server_configure.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server_configure.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server_configure.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server_configure.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server_install.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server_install.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server_install.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server_install.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/vector_configure.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/vector_configure.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/vector_configure.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/vector_configure.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/vector_install.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/vector_install.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/vector_install.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/vector_install.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/mod.rs b/svc/pkg/cluster/src/workflows/server/install/install_scripts/mod.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/mod.rs rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/mod.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_install/mod.rs b/svc/pkg/cluster/src/workflows/server/install/mod.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/mod.rs rename to svc/pkg/cluster/src/workflows/server/install/mod.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_install_complete.rs b/svc/pkg/cluster/src/workflows/server/install_complete.rs similarity index 95% rename from svc/pkg/cluster/worker/src/workers/server_install_complete.rs rename to svc/pkg/cluster/src/workflows/server/install_complete.rs index 138bb73ce..d1e375ec8 100644 --- a/svc/pkg/cluster/worker/src/workers/server_install_complete.rs +++ b/svc/pkg/cluster/src/workflows/server/install_complete.rs @@ -1,3 +1,5 @@ +// TODO: Redo prebake system + use chirp_worker::prelude::*; use proto::backend::{self, pkg::*}; diff --git a/svc/pkg/cluster/src/workflows/server/mod.rs b/svc/pkg/cluster/src/workflows/server/mod.rs new file mode 100644 index 000000000..9451de352 --- /dev/null +++ b/svc/pkg/cluster/src/workflows/server/mod.rs @@ -0,0 +1,458 @@ +use std::net::{IpAddr, Ipv4Addr}; + +use chirp_workflow::prelude::*; +use rand::Rng; + +use crate::{ + types::{Datacenter, PoolType, Provider}, + util::metrics, +}; + +#[derive(Debug, Serialize, Deserialize)] +pub(crate) struct Input { + pub datacenter_id: Uuid, + pub server_id: Uuid, + pub pool_type: PoolType, + pub provider: Provider, + pub tags: Vec, +} + +#[workflow] +pub async fn cluster_server(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResult<()> { + let dc = ctx + .activity(GetDcInput { + datacenter_id: input.datacenter_id, + }) + .await?; + + let pool = unwrap!( + dc.pools.iter().find(|p| p.pool_type == input.pool_type), + "datacenter does not have this type of pool configured" + ); + + // Get a new vlan ip + let vlan_ip = ctx + .activity(GetVlanIpInput { + datacenter_id: input.datacenter_id, + server_id: input.server_id, + pool_type: input.pool_type, + }) + .await?; + + // Iterate through list of hardware and attempt to schedule a server. Goes to the next + // hardware if an error happens during provisioning + let mut hardware_list = pool.hardware.iter(); + let provision_res = loop { + // List exhausted + let Some(hardware) = hardware_list.next() else { + break None; + }; + + tracing::info!( + "attempting to provision hardware: {}", + hardware.provider_hardware, + ); + + match input.provider { + Provider::Linode => { + // TODO: + let custom_image = None; + + ctx.dispatch_workflow(linode::workflows::server::Input { + server_id: input.server_id, + provider_datacenter_id: dc.provider_datacenter_id.clone(), + custom_image, + api_token: dc.provider_api_token.clone(), + hardware: hardware.provider_hardware, + firewall_preset: match input.pool_type { + PoolType::Job => linode::types::FirewallPreset::Job, + PoolType::Gg => linode::types::FirewallPreset::Gg, + PoolType::Ats => linode::types::FirewallPreset::Ats, + }, + vlan_ip, + tags: input.tags.clone(), + }) + .await?; + + match ctx.listen::().await? { + Linode::ProvisionComplete(sig) => { + break Some(ProvisionResponse { + provider_server_id: sig.linode_id.to_string(), + provider_hardware: hardware.provider_hardware, + public_ip: sig.public_ip, + already_installed: custom_image.is_some(), + }); + } + Linode::ProvisionFailed(sig) => { + tracing::error!( + err=%sig.err, + server_id=?input.server_id, + "failed to provision server" + ); + } + } + } + } + }; + + if let Some(provision_res) = provision_res { + let already_installed = provision_res.already_installed; + + let db_res = ctx + .activity(UpdateDbInput { + server_id: input.server_id, + pool_type: input.pool_type.clone(), + cluster_id: dc.cluster_id, + datacenter_id: dc.datacenter_id, + provider_datacenter_id: dc.provider_datacenter_id.clone(), + datacenter_name_id: dc.name_id.clone(), + provision_res, + }) + .await?; + + if already_installed { + // Create DNS record because the server is already installed + if let PoolType::Gg = input.pool_type { + // TODO: MOVE + // // Source of truth record + // sql_execute!( + // [ctx] + // " + // INSERT INTO db_cluster.servers_cloudflare (server_id) + // VALUES ($1) + // ", + // server_id, + // ) + // .await?; + + ctx.workflow(dns_create::Input {}).await?; + } + } + // Install components on server + else { + let request_id = Uuid::new_v4(); + + ctx.workflow(install::Input { + // public_ip: provision_res.public_ip, + // datacenter_id: input.datacenter_id, + // server_id: input.server_id, + // pool_type: input.pool_type, + // provider: input.provider, + // initialize_immediately: true, + }) + .await?; + } + } else { + tracing::error!( + server_id=?input.server_id, + hardware_options=?pool.hardware.len(), + "failed all attempts to provision server" + ); + bail!("failed all attempts to provision server"); + + // TODO: destroy in db + } + + let mut state = State::default(); + loop { + match state.listen(ctx).await? { + Main::Drain(sig) => {} + Main::Undrain(sig) => {} + Main::Taint(sig) => {} + Main::DnsCreate(sig) => {} + Main::DnsDelete(sig) => {} + Main::Destroy(sig) => { + break; + } + } + } + + Ok(()) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct GetDcInput { + datacenter_id: Uuid, +} + +#[activity(GetDc)] +async fn get_dc(ctx: &ActivityCtx, input: &GetDcInput) -> GlobalResult { + let dcs_res = ctx + .op(crate::ops::datacenter::get::Input { + datacenter_ids: vec![input.datacenter_id], + }) + .await?; + let dc = unwrap!(dcs_res.datacenters.into_iter().next()); + + Ok(dc) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct GetVlanIpInput { + datacenter_id: Uuid, + server_id: Uuid, + pool_type: PoolType, +} + +#[activity(GetVlanIp)] +async fn get_vlan_ip(ctx: &ActivityCtx, input: &GetVlanIpInput) -> GlobalResult { + // Find next available vlan index + let mut vlan_addr_range = match input.pool_type { + PoolType::Job => util::net::job::vlan_addr_range(), + PoolType::Gg => util::net::gg::vlan_addr_range(), + PoolType::Ats => util::net::ats::vlan_addr_range(), + }; + let max_idx = vlan_addr_range.count() as i64; + + let (network_idx,) = sql_fetch_one!( + [ctx, (i64,)] + " + WITH + get_next_network_idx AS ( + SELECT mod(idx + $1, $2) AS idx + FROM generate_series(0, $2) AS s(idx) + WHERE NOT EXISTS ( + SELECT 1 + FROM db_cluster.servers + WHERE + pool_type2 = $3 AND + -- Technically this should check all servers where their datacenter's provider and + -- provider_datacenter_id are the same because VLAN is separated by irl datacenter + -- but this is good enough + datacenter_id = $4 AND + network_idx = mod(idx + $1, $2) AND + cloud_destroy_ts IS NULL + ) + LIMIT 1 + ), + update_network_idx AS ( + UPDATE db_cluster.servers + SET network_idx = (SELECT idx FROM get_next_network_idx) + WHERE server_id = $5 + RETURNING 1 + ) + SELECT idx FROM get_next_network_idx + ", + // Choose a random index to start from for better index spread + rand::thread_rng().gen_range(0i64..max_idx), + max_idx, + serde_json::to_string(&input.pool_type)?, + input.datacenter_id, + input.server_id, + ) + .await?; + + let vlan_ip = unwrap!(vlan_addr_range.nth(network_idx as usize)); + + // Write vlan ip + sql_execute!( + [ctx] + " + UPDATE db_cluster.servers + SET vlan_ip = $2 + WHERE server_id = $1 + ", + input.server_id, + IpAddr::V4(vlan_ip), + ) + .await?; + + Ok(vlan_ip) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct ProvisionResponse { + provider_server_id: String, + provider_hardware: String, + public_ip: Ipv4Addr, + already_installed: bool, +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct UpdateDbInput { + server_id: Uuid, + pool_type: PoolType, + cluster_id: Uuid, + datacenter_id: Uuid, + provider_datacenter_id: String, + datacenter_name_id: String, + provision_res: ProvisionResponse, +} + +#[activity(UpdateDb)] +async fn update_db(ctx: &ActivityCtx, input: &UpdateDbInput) -> GlobalResult<()> { + let provision_complete_ts = util::timestamp::now(); + + let (create_ts,) = sql_fetch_one!( + [ctx, (i64,)] + " + UPDATE db_cluster.servers + SET + provider_server_id = $2, + provider_hardware = $3, + public_ip = $4, + provision_complete_ts = $5, + install_complete_ts = $6 + WHERE server_id = $1 + RETURNING create_ts + ", + input.server_id, + &input.provision_res.provider_server_id, + &input.provision_res.provider_hardware, + IpAddr::V4(input.provision_res.public_ip), + provision_complete_ts, + if input.provision_res.already_installed { + Some(provision_complete_ts) + } else { + None + }, + ) + .await?; + + insert_metrics( + input.cluster_id, + input.datacenter_id, + &input.provider_datacenter_id, + &input.datacenter_name_id, + &input.pool_type, + provision_complete_ts, + create_ts, + ) + .await; + + Ok(()) +} + +/// Finite state machine for handling server updates. +struct State { + draining: bool, + has_dns: bool, + is_tainted: bool, +} + +impl State { + /* ==== BINARY CONDITION DECOMPOSITION ==== + + // state + drain dns taint // available actions + 0 0 0 // destroy, drain, taint, dns create + 0 0 1 // destroy, drain + 0 1 0 // destroy, drain, taint, dns delete + 0 1 1 // destroy, drain, dns delete + 1 0 0 // destroy, undrain, taint + 1 0 1 // destroy + 1 1 0 // destroy, undrain, taint, dns delete + 1 1 1 // destroy + + destroy // always + drain // if !drain + undrain // if drain && !taint + taint // if !taint + dns create // if !dns && !drain && !taint + dns delete // if dns && !(drain && taint) + */ + async fn listen(&mut self, ctx: &mut WorkflowCtx) -> WorkflowResult

{ + // Determine which signals to listen to + let mut signals = Vec::with_capacity(6); + + signals[0] = Destroy::NAME; + + if self.draining { + signals.push(Drain::NAME); + } else if !self.is_tainted { + signals.push(Undrain::NAME); + } + + if !self.is_tainted { + signals.push(Taint::NAME); + } + + if !self.has_dns && !self.draining && !self.is_tainted { + signals.push(DnsCreate::NAME); + } + + if !self.has_dns && !(self.draining && self.is_tainted) { + signals.push(DnsDelete::NAME); + } + + let row = ctx.listen_any(&signals).await?; + let signal = Main::parse(&row.signal_name, row.body)?; + + // Update state + self.transition(&signal); + + Ok(signal) + } + + fn transition(&mut self, signal: &Main) { + match signal { + Main::Drain(_) => self.draining = true, + Main::Undrain(_) => self.draining = false, + Main::Taint(_) => self.is_tainted = true, + Main::DnsCreate(_) => self.has_dns = true, + Main::DnsDelete(_) => self.has_dns = false, + _ => {} + } + } +} + +impl Default for State { + fn default() -> Self { + State { + draining: false, + has_dns: true, + is_tainted: false, + } + } +} + +async fn insert_metrics( + cluster_id: Uuid, + datacenter_id: Uuid, + provider_datacenter_id: &str, + datacenter_name_id: &str, + pool_type: &PoolType, + provision_complete_ts: i64, + create_ts: i64, +) { + let dt = (provision_complete_ts - create_ts) as f64 / 1000.0; + + metrics::PROVISION_DURATION + .with_label_values(&[ + &cluster_id.to_string(), + &datacenter_id.to_string(), + &provider_datacenter_id, + &datacenter_name_id, + match pool_type { + PoolType::Job => "job", + PoolType::Gg => "gg", + PoolType::Ats => "ats", + }, + ]) + .observe(dt); +} + +// Listen for linode provision signals +type ProvisionComplete = linode::workflows::server::ProvisionComplete; +type ProvisionFailed = linode::workflows::server::ProvisionFailed; +join_signal!(Linode, [ProvisionComplete, ProvisionFailed]); + +#[signal("cluster-server-drain")] +pub struct Drain {} + +#[signal("cluster-server-undrain")] +pub struct Undrain {} + +#[signal("cluster-server-taint")] +pub struct Taint {} + +#[signal("cluster-server-dns-create")] +pub struct DnsCreate {} + +#[signal("cluster-server-dns-delete")] +pub struct DnsDelete {} + +#[signal("cluster-server-destroy")] +pub struct Destroy {} + +join_signal!(Main, [Drain, Undrain, Taint, DnsCreate, DnsDelete, Destroy]); diff --git a/svc/pkg/cluster/worker/src/workers/nomad_node_drain_complete.rs b/svc/pkg/cluster/src/workflows/server/nomad_node_drain_complete.rs similarity index 92% rename from svc/pkg/cluster/worker/src/workers/nomad_node_drain_complete.rs rename to svc/pkg/cluster/src/workflows/server/nomad_node_drain_complete.rs index 681009a5c..5b5dc0880 100644 --- a/svc/pkg/cluster/worker/src/workers/nomad_node_drain_complete.rs +++ b/svc/pkg/cluster/src/workflows/server/nomad_node_drain_complete.rs @@ -1,3 +1,5 @@ +// TODO: Convert monitor_node_drain_complete to signal + use chirp_worker::prelude::*; use proto::backend::pkg::*; diff --git a/svc/pkg/cluster/worker/src/workers/nomad_node_registered.rs b/svc/pkg/cluster/src/workflows/server/nomad_node_registered.rs similarity index 97% rename from svc/pkg/cluster/worker/src/workers/nomad_node_registered.rs rename to svc/pkg/cluster/src/workflows/server/nomad_node_registered.rs index bd09e974b..1047b4d0c 100644 --- a/svc/pkg/cluster/worker/src/workers/nomad_node_registered.rs +++ b/svc/pkg/cluster/src/workflows/server/nomad_node_registered.rs @@ -1,3 +1,5 @@ +// TODO: Convert monitor_node_registered to signal + use chirp_worker::prelude::*; use proto::backend::pkg::*; use util_cluster::metrics; diff --git a/svc/pkg/cluster/worker/src/workers/server_taint.rs b/svc/pkg/cluster/src/workflows/server/taint.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_taint.rs rename to svc/pkg/cluster/src/workflows/server/taint.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_undrain.rs b/svc/pkg/cluster/src/workflows/server/undrain.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_undrain.rs rename to svc/pkg/cluster/src/workflows/server/undrain.rs diff --git a/svc/pkg/cluster/standalone/datacenter-tls-renew/Cargo.toml b/svc/pkg/cluster/standalone/datacenter-tls-renew/Cargo.toml index 61b2cf47d..40ea0a6ca 100644 --- a/svc/pkg/cluster/standalone/datacenter-tls-renew/Cargo.toml +++ b/svc/pkg/cluster/standalone/datacenter-tls-renew/Cargo.toml @@ -16,7 +16,7 @@ tokio = { version = "1.29", features = ["full"] } tracing = "0.1" tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt", "json", "ansi"] } -cluster-datacenter-get = { path = "../../ops/datacenter-get" } +cluster = { path = "../.." } [dependencies.sqlx] git = "https://github.com/rivet-gg/sqlx" diff --git a/svc/pkg/cluster/standalone/default-update/Cargo.toml b/svc/pkg/cluster/standalone/default-update/Cargo.toml index 4fb14ac06..e8bbde600 100644 --- a/svc/pkg/cluster/standalone/default-update/Cargo.toml +++ b/svc/pkg/cluster/standalone/default-update/Cargo.toml @@ -18,11 +18,8 @@ tokio = { version = "1.29", features = ["full"] } tracing = "0.1" tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt", "json", "ansi"] } uuid = { version = "1", features = ["v4"] } -util-cluster = { package = "rivet-util-cluster", path = "../../util" } -cluster-get = { path = "../../ops/get" } -cluster-datacenter-get = { path = "../../ops/datacenter-get" } -cluster-datacenter-list = { path = "../../ops/datacenter-list" } +cluster = { path = "../.." } [dev-dependencies] chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/standalone/fix-tls/Cargo.toml b/svc/pkg/cluster/standalone/fix-tls/Cargo.toml index 7bea53a34..2832cbe0e 100644 --- a/svc/pkg/cluster/standalone/fix-tls/Cargo.toml +++ b/svc/pkg/cluster/standalone/fix-tls/Cargo.toml @@ -33,14 +33,9 @@ serde_yaml = "0.9" ssh2 = "0.9.4" thiserror = "1.0" trust-dns-resolver = { version = "0.23.2", features = ["dns-over-native-tls"] } -util-cluster = { package = "rivet-util-cluster", path = "../../util" } -cluster-datacenter-get = { path = "../../ops/datacenter-get" } -cluster-datacenter-list = { path = "../../ops/datacenter-list" } -cluster-datacenter-topology-get = { path = "../../ops/datacenter-topology-get" } -linode-instance-type-get = { path = "../../../linode/ops/instance-type-get" } -linode-server-destroy = { path = "../../../linode/ops/server-destroy" } -linode-server-provision = { path = "../../../linode/ops/server-provision" } +cluster = { path = "../.." } +linode = { path = "../../../linode" } token-create = { path = "../../../token/ops/create" } [dev-dependencies] diff --git a/svc/pkg/cluster/standalone/gc/Cargo.toml b/svc/pkg/cluster/standalone/gc/Cargo.toml index 242b1b67f..c30da8cd9 100644 --- a/svc/pkg/cluster/standalone/gc/Cargo.toml +++ b/svc/pkg/cluster/standalone/gc/Cargo.toml @@ -16,7 +16,7 @@ tokio = { version = "1.29", features = ["full"] } tracing = "0.1" tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt", "json", "ansi"] } -cluster-datacenter-get = { path = "../../ops/datacenter-get" } +cluster = { path = "../.." } [dependencies.sqlx] git = "https://github.com/rivet-gg/sqlx" @@ -25,4 +25,3 @@ default-features = false [dev-dependencies] chirp-worker = { path = "../../../../../lib/chirp/worker" } -util-cluster = { package = "rivet-util-cluster", path = "../../util" } diff --git a/svc/pkg/cluster/standalone/metrics-publish/Cargo.toml b/svc/pkg/cluster/standalone/metrics-publish/Cargo.toml index bc0073f00..fc2f121a4 100644 --- a/svc/pkg/cluster/standalone/metrics-publish/Cargo.toml +++ b/svc/pkg/cluster/standalone/metrics-publish/Cargo.toml @@ -15,9 +15,8 @@ rivet-runtime = { path = "../../../../../lib/runtime" } tokio = { version = "1.29", features = ["full"] } tracing = "0.1" tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt", "json", "ansi"] } -util-cluster = { package = "rivet-util-cluster", path = "../../util" } -cluster-datacenter-get = { path = "../../ops/datacenter-get" } +cluster = { path = "../.." } [dependencies.sqlx] git = "https://github.com/rivet-gg/sqlx" @@ -26,4 +25,3 @@ default-features = false [dev-dependencies] chirp-worker = { path = "../../../../../lib/chirp/worker" } -util-cluster = { package = "rivet-util-cluster", path = "../../util" } diff --git a/svc/pkg/cluster/worker/tests/common.rs b/svc/pkg/cluster/testsTMP/common.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/common.rs rename to svc/pkg/cluster/testsTMP/common.rs diff --git a/svc/pkg/cluster/worker/tests/create.rs b/svc/pkg/cluster/testsTMP/create.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/create.rs rename to svc/pkg/cluster/testsTMP/create.rs diff --git a/svc/pkg/cluster/worker/tests/datacenter_create.rs b/svc/pkg/cluster/testsTMP/datacenter_create.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/datacenter_create.rs rename to svc/pkg/cluster/testsTMP/datacenter_create.rs diff --git a/svc/pkg/cluster/ops/datacenter-get/tests/integration.rs b/svc/pkg/cluster/testsTMP/datacenter_get.rs similarity index 100% rename from svc/pkg/cluster/ops/datacenter-get/tests/integration.rs rename to svc/pkg/cluster/testsTMP/datacenter_get.rs diff --git a/svc/pkg/cluster/ops/datacenter-list/tests/integration.rs b/svc/pkg/cluster/testsTMP/datacenter_list.rs similarity index 100% rename from svc/pkg/cluster/ops/datacenter-list/tests/integration.rs rename to svc/pkg/cluster/testsTMP/datacenter_list.rs diff --git a/svc/pkg/cluster/ops/datacenter-location-get/tests/integration.rs b/svc/pkg/cluster/testsTMP/datacenter_location_get.rs similarity index 100% rename from svc/pkg/cluster/ops/datacenter-location-get/tests/integration.rs rename to svc/pkg/cluster/testsTMP/datacenter_location_get.rs diff --git a/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/tests/integration.rs b/svc/pkg/cluster/testsTMP/datacenter_resolve_for_name_id.rs similarity index 100% rename from svc/pkg/cluster/ops/datacenter-resolve-for-name-id/tests/integration.rs rename to svc/pkg/cluster/testsTMP/datacenter_resolve_for_name_id.rs diff --git a/svc/pkg/cluster/worker/tests/datacenter_scale.rs b/svc/pkg/cluster/testsTMP/datacenter_scale.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/datacenter_scale.rs rename to svc/pkg/cluster/testsTMP/datacenter_scale.rs diff --git a/svc/pkg/cluster/ops/datacenter-tls-get/tests/integration.rs b/svc/pkg/cluster/testsTMP/datacenter_tls_get.rs similarity index 100% rename from svc/pkg/cluster/ops/datacenter-tls-get/tests/integration.rs rename to svc/pkg/cluster/testsTMP/datacenter_tls_get.rs diff --git a/svc/pkg/cluster/worker/tests/datacenter_tls_issue.rs b/svc/pkg/cluster/testsTMP/datacenter_tls_issue.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/datacenter_tls_issue.rs rename to svc/pkg/cluster/testsTMP/datacenter_tls_issue.rs diff --git a/svc/pkg/cluster/ops/datacenter-topology-get/tests/integration.rs b/svc/pkg/cluster/testsTMP/datacenter_topology_get.rs similarity index 100% rename from svc/pkg/cluster/ops/datacenter-topology-get/tests/integration.rs rename to svc/pkg/cluster/testsTMP/datacenter_topology_get.rs diff --git a/svc/pkg/cluster/worker/tests/datacenter_update.rs b/svc/pkg/cluster/testsTMP/datacenter_update.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/datacenter_update.rs rename to svc/pkg/cluster/testsTMP/datacenter_update.rs diff --git a/svc/pkg/cluster/worker/tests/game_link.rs b/svc/pkg/cluster/testsTMP/game_link.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/game_link.rs rename to svc/pkg/cluster/testsTMP/game_link.rs diff --git a/svc/pkg/cluster/ops/get/tests/integration.rs b/svc/pkg/cluster/testsTMP/get.rs similarity index 100% rename from svc/pkg/cluster/ops/get/tests/integration.rs rename to svc/pkg/cluster/testsTMP/get.rs diff --git a/svc/pkg/cluster/ops/get-for-game/tests/integration.rs b/svc/pkg/cluster/testsTMP/get_for_game.rs similarity index 100% rename from svc/pkg/cluster/ops/get-for-game/tests/integration.rs rename to svc/pkg/cluster/testsTMP/get_for_game.rs diff --git a/svc/pkg/cluster/ops/list/tests/integration.rs b/svc/pkg/cluster/testsTMP/list.rs similarity index 100% rename from svc/pkg/cluster/ops/list/tests/integration.rs rename to svc/pkg/cluster/testsTMP/list.rs diff --git a/svc/pkg/cluster/worker/tests/nomad_node_drain_complete.rs b/svc/pkg/cluster/testsTMP/nomad_node_drain_complete.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/nomad_node_drain_complete.rs rename to svc/pkg/cluster/testsTMP/nomad_node_drain_complete.rs diff --git a/svc/pkg/cluster/worker/tests/nomad_node_registered.rs b/svc/pkg/cluster/testsTMP/nomad_node_registered.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/nomad_node_registered.rs rename to svc/pkg/cluster/testsTMP/nomad_node_registered.rs diff --git a/svc/pkg/cluster/ops/resolve-for-name-id/tests/integration.rs b/svc/pkg/cluster/testsTMP/resolve_for_name_id.rs similarity index 100% rename from svc/pkg/cluster/ops/resolve-for-name-id/tests/integration.rs rename to svc/pkg/cluster/testsTMP/resolve_for_name_id.rs diff --git a/svc/pkg/cluster/worker/tests/server_destroy.rs b/svc/pkg/cluster/testsTMP/server_destroy.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/server_destroy.rs rename to svc/pkg/cluster/testsTMP/server_destroy.rs diff --git a/svc/pkg/cluster/ops/server-destroy-with-filter/tests/integration.rs b/svc/pkg/cluster/testsTMP/server_destroy_with_filter.rs similarity index 100% rename from svc/pkg/cluster/ops/server-destroy-with-filter/tests/integration.rs rename to svc/pkg/cluster/testsTMP/server_destroy_with_filter.rs diff --git a/svc/pkg/cluster/worker/tests/server_dns_create.rs b/svc/pkg/cluster/testsTMP/server_dns_create.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/server_dns_create.rs rename to svc/pkg/cluster/testsTMP/server_dns_create.rs diff --git a/svc/pkg/cluster/worker/tests/server_dns_delete.rs b/svc/pkg/cluster/testsTMP/server_dns_delete.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/server_dns_delete.rs rename to svc/pkg/cluster/testsTMP/server_dns_delete.rs diff --git a/svc/pkg/cluster/worker/tests/server_drain.rs b/svc/pkg/cluster/testsTMP/server_drain.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/server_drain.rs rename to svc/pkg/cluster/testsTMP/server_drain.rs diff --git a/svc/pkg/cluster/ops/server-get/tests/integration.rs b/svc/pkg/cluster/testsTMP/server_get.rs similarity index 100% rename from svc/pkg/cluster/ops/server-get/tests/integration.rs rename to svc/pkg/cluster/testsTMP/server_get.rs diff --git a/svc/pkg/cluster/worker/tests/server_install.rs b/svc/pkg/cluster/testsTMP/server_install.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/server_install.rs rename to svc/pkg/cluster/testsTMP/server_install.rs diff --git a/svc/pkg/cluster/worker/tests/server_install_complete.rs b/svc/pkg/cluster/testsTMP/server_install_complete.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/server_install_complete.rs rename to svc/pkg/cluster/testsTMP/server_install_complete.rs diff --git a/svc/pkg/cluster/ops/server-list/tests/integration.rs b/svc/pkg/cluster/testsTMP/server_list.rs similarity index 100% rename from svc/pkg/cluster/ops/server-list/tests/integration.rs rename to svc/pkg/cluster/testsTMP/server_list.rs diff --git a/svc/pkg/cluster/worker/tests/server_provision.rs b/svc/pkg/cluster/testsTMP/server_provision.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/server_provision.rs rename to svc/pkg/cluster/testsTMP/server_provision.rs diff --git a/svc/pkg/cluster/ops/server-resolve-for-ip/tests/integration.rs b/svc/pkg/cluster/testsTMP/server_resolve_for_ip.rs similarity index 100% rename from svc/pkg/cluster/ops/server-resolve-for-ip/tests/integration.rs rename to svc/pkg/cluster/testsTMP/server_resolve_for_ip.rs diff --git a/svc/pkg/cluster/worker/tests/server_taint.rs b/svc/pkg/cluster/testsTMP/server_taint.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/server_taint.rs rename to svc/pkg/cluster/testsTMP/server_taint.rs diff --git a/svc/pkg/cluster/worker/tests/server_undrain.rs b/svc/pkg/cluster/testsTMP/server_undrain.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/server_undrain.rs rename to svc/pkg/cluster/testsTMP/server_undrain.rs diff --git a/svc/pkg/cluster/util/Cargo.toml b/svc/pkg/cluster/util/Cargo.toml deleted file mode 100644 index b1065186b..000000000 --- a/svc/pkg/cluster/util/Cargo.toml +++ /dev/null @@ -1,18 +0,0 @@ -[package] -name = "rivet-util-cluster" -version = "0.1.0" -edition = "2021" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -lazy_static = "1.4" -rivet-metrics = { path = "../../../../lib/metrics" } -rivet-util = { path = "../../../../lib/util/core" } -types = { path = "../../../../lib/types/core" } -uuid = { version = "1", features = ["v4", "serde"] } - -[build-dependencies] -merkle_hash = "3.6" -hex = "0.4" -tokio = { version = "1.29", features = ["full"] } diff --git a/svc/pkg/cluster/worker/Cargo.toml b/svc/pkg/cluster/worker/Cargo.toml index 7fefd6abd..0ed9bcc1a 100644 --- a/svc/pkg/cluster/worker/Cargo.toml +++ b/svc/pkg/cluster/worker/Cargo.toml @@ -33,9 +33,7 @@ util-cluster = { package = "rivet-util-cluster", path = "../util" } cluster-datacenter-get = { path = "../ops/datacenter-get" } cluster-datacenter-list = { path = "../ops/datacenter-list" } cluster-datacenter-topology-get = { path = "../ops/datacenter-topology-get" } -linode-instance-type-get = { path = "../../linode/ops/instance-type-get" } -linode-server-destroy = { path = "../../linode/ops/server-destroy" } -linode-server-provision = { path = "../../linode/ops/server-provision" } +linode = { path = "../../linode" } token-create = { path = "../../token/ops/create" } [dependencies.nomad_client] diff --git a/svc/pkg/cluster/worker/src/lib.rs b/svc/pkg/cluster/worker/src/lib.rs deleted file mode 100644 index beb1874f4..000000000 --- a/svc/pkg/cluster/worker/src/lib.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod util; -pub mod workers; diff --git a/svc/pkg/cluster/worker/src/util.rs b/svc/pkg/cluster/worker/src/util.rs deleted file mode 100644 index 9cc49fea5..000000000 --- a/svc/pkg/cluster/worker/src/util.rs +++ /dev/null @@ -1,6 +0,0 @@ -#[derive(thiserror::Error, Debug)] -#[error("cloudflare: {source}")] -pub struct CloudflareError { - #[from] - source: anyhow::Error, -} diff --git a/svc/pkg/cluster/worker/src/workers/create.rs b/svc/pkg/cluster/worker/src/workers/create.rs deleted file mode 100644 index 8f6aee608..000000000 --- a/svc/pkg/cluster/worker/src/workers/create.rs +++ /dev/null @@ -1,33 +0,0 @@ -use chirp_worker::prelude::*; -use proto::backend::pkg::*; - -#[worker(name = "cluster-create")] -async fn worker(ctx: &OperationContext) -> GlobalResult<()> { - let cluster_id = unwrap_ref!(ctx.cluster_id).as_uuid(); - let owner_team_id = ctx.owner_team_id.map(|id| id.as_uuid()); - - sql_execute!( - [ctx] - " - INSERT INTO db_cluster.clusters ( - cluster_id, - name_id, - owner_team_id, - create_ts - ) - VALUES ($1, $2, $3, $4) - ", - cluster_id, - &ctx.name_id, - owner_team_id, - util::timestamp::now(), - ) - .await?; - - msg!([ctx] cluster::msg::create_complete(cluster_id) { - cluster_id: ctx.cluster_id - }) - .await?; - - Ok(()) -} diff --git a/svc/pkg/cluster/worker/src/workers/datacenter_create.rs b/svc/pkg/cluster/worker/src/workers/datacenter_create.rs deleted file mode 100644 index d04018c80..000000000 --- a/svc/pkg/cluster/worker/src/workers/datacenter_create.rs +++ /dev/null @@ -1,98 +0,0 @@ -use chirp_worker::prelude::*; -use futures_util::FutureExt; -use proto::backend::{self, pkg::*}; - -#[worker(name = "cluster-datacenter-create")] -async fn worker( - ctx: &OperationContext, -) -> GlobalResult<()> { - let cluster_id = unwrap_ref!(ctx.cluster_id).as_uuid(); - let datacenter_id = unwrap_ref!(ctx.datacenter_id).as_uuid(); - - let mut pools = ctx.pools.clone(); - - // Constrain the desired count - for pool in &mut pools { - pool.desired_count = pool.desired_count.max(pool.min_count).min(pool.max_count); - } - - // Copy pools config to write to db - let pools = cluster::msg::datacenter_create::Pools { pools }; - - let mut pools_buf = Vec::with_capacity(pools.encoded_len()); - pools.encode(&mut pools_buf)?; - - rivet_pools::utils::crdb::tx(&ctx.crdb().await?, |tx| { - let ctx = ctx.clone(); - let pools_buf = pools_buf.clone(); - - async move { - sql_execute!( - [ctx, @tx tx] - " - INSERT INTO db_cluster.datacenters ( - datacenter_id, - cluster_id, - name_id, - display_name, - provider, - provider_datacenter_id, - provider_api_token, - pools, - build_delivery_method, - prebakes_enabled, - create_ts - ) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) - ", - datacenter_id, - cluster_id, - &ctx.name_id, - &ctx.display_name, - ctx.provider as i64, - &ctx.provider_datacenter_id, - &ctx.provider_api_token, - pools_buf, - ctx.build_delivery_method as i64, - ctx.prebakes_enabled, - util::timestamp::now(), - ) - .await?; - - // Insert TLS record - sql_execute!( - [ctx, @tx tx] - " - INSERT INTO db_cluster.datacenter_tls ( - datacenter_id, - state, - expire_ts - ) - VALUES ($1, $2, 0) - ", - datacenter_id, - backend::cluster::TlsState::Creating as i64, - ) - .await?; - - Ok(()) - } - .boxed() - }) - .await?; - - // Start TLS issuing process - msg!([ctx] cluster::msg::datacenter_tls_issue(datacenter_id) { - datacenter_id: ctx.datacenter_id, - renew: false, - }) - .await?; - - // Scale servers - msg!([ctx] cluster::msg::datacenter_scale(datacenter_id) { - datacenter_id: ctx.datacenter_id, - }) - .await?; - - Ok(()) -} diff --git a/svc/pkg/cluster/worker/src/workers/datacenter_update.rs b/svc/pkg/cluster/worker/src/workers/datacenter_update.rs deleted file mode 100644 index e29663bd3..000000000 --- a/svc/pkg/cluster/worker/src/workers/datacenter_update.rs +++ /dev/null @@ -1,81 +0,0 @@ -use chirp_worker::prelude::*; -use proto::backend::pkg::*; - -#[worker(name = "cluster-datacenter-update")] -async fn worker( - ctx: &OperationContext, -) -> GlobalResult<()> { - let datacenter_id = unwrap_ref!(ctx.datacenter_id).as_uuid(); - - let datacenter_res = op!([ctx] cluster_datacenter_get { - datacenter_ids: vec![datacenter_id.into()], - }) - .await?; - let datacenter = unwrap!( - datacenter_res.datacenters.first(), - "datacenter does not exist" - ); - - // Update pools config - let mut new_pools = cluster::msg::datacenter_create::Pools { - pools: datacenter.pools.clone(), - }; - for pool in &ctx.pools { - let current_pool = unwrap!( - new_pools - .pools - .iter_mut() - .find(|p| p.pool_type == pool.pool_type), - "attempting to update pool that doesn't exist in current config" - ); - - // Update pool config - if !pool.hardware.is_empty() { - current_pool.hardware.clone_from(&pool.hardware); - } - if let Some(desired_count) = pool.desired_count { - current_pool.desired_count = desired_count; - } - if let Some(min_count) = pool.min_count { - current_pool.min_count = min_count; - } - if let Some(max_count) = pool.max_count { - current_pool.max_count = max_count; - } - if let Some(drain_timeout) = pool.drain_timeout { - current_pool.drain_timeout = drain_timeout; - } - } - - // Encode config - let mut pools_buf = Vec::with_capacity(new_pools.encoded_len()); - new_pools.encode(&mut pools_buf)?; - - // Update pools - sql_execute!( - [ctx] - " - UPDATE db_cluster.datacenters - SET - pools = $2, - prebakes_enabled = coalesce($3, prebakes_enabled) - WHERE datacenter_id = $1 - ", - datacenter_id, - pools_buf, - ctx.prebakes_enabled, - ) - .await?; - - // Purge cache - ctx.cache() - .purge("cluster.datacenters", [datacenter_id]) - .await?; - - msg!([ctx] cluster::msg::datacenter_scale(datacenter_id) { - datacenter_id: ctx.datacenter_id, - }) - .await?; - - Ok(()) -} diff --git a/svc/pkg/cluster/worker/src/workers/game_link.rs b/svc/pkg/cluster/worker/src/workers/game_link.rs deleted file mode 100644 index 64f241cbb..000000000 --- a/svc/pkg/cluster/worker/src/workers/game_link.rs +++ /dev/null @@ -1,30 +0,0 @@ -use chirp_worker::prelude::*; -use proto::backend::pkg::*; - -#[worker(name = "cluster-game-link")] -async fn worker(ctx: &OperationContext) -> GlobalResult<()> { - let game_id = unwrap_ref!(ctx.game_id).as_uuid(); - let cluster_id = unwrap_ref!(ctx.cluster_id).as_uuid(); - - sql_execute!( - [ctx] - " - INSERT INTO db_cluster.games ( - game_id, - cluster_id - ) - VALUES ($1, $2) - ", - game_id, - cluster_id, - ) - .await?; - - msg!([ctx] cluster::msg::game_link_complete(game_id, cluster_id) { - game_id: ctx.game_id, - cluster_id: ctx.cluster_id, - }) - .await?; - - Ok(()) -} diff --git a/svc/pkg/cluster/worker/src/workers/mod.rs b/svc/pkg/cluster/worker/src/workers/mod.rs deleted file mode 100644 index a943ba64d..000000000 --- a/svc/pkg/cluster/worker/src/workers/mod.rs +++ /dev/null @@ -1,37 +0,0 @@ -pub mod create; -pub mod datacenter_create; -pub mod datacenter_scale; -pub mod datacenter_tls_issue; -pub mod datacenter_update; -pub mod game_link; -pub mod nomad_node_drain_complete; -pub mod nomad_node_registered; -pub mod server_destroy; -pub mod server_dns_create; -pub mod server_dns_delete; -pub mod server_drain; -pub mod server_install; -pub mod server_install_complete; -pub mod server_provision; -pub mod server_taint; -pub mod server_undrain; - -chirp_worker::workers![ - server_taint, - create, - datacenter_create, - datacenter_scale, - datacenter_tls_issue, - datacenter_update, - game_link, - nomad_node_drain_complete, - nomad_node_registered, - server_destroy, - server_dns_create, - server_dns_delete, - server_drain, - server_install_complete, - server_install, - server_provision, - server_undrain, -]; diff --git a/svc/pkg/cluster/worker/src/workers/server_provision.rs b/svc/pkg/cluster/worker/src/workers/server_provision.rs deleted file mode 100644 index 2376d670d..000000000 --- a/svc/pkg/cluster/worker/src/workers/server_provision.rs +++ /dev/null @@ -1,314 +0,0 @@ -use std::net::{IpAddr, Ipv4Addr}; - -use chirp_worker::prelude::*; -use futures_util::FutureExt; -use proto::backend::{self, cluster::PoolType, pkg::*}; -use rand::Rng; -use util_cluster::metrics; - -struct ProvisionResponse { - provider_server_id: String, - provider_hardware: String, - public_ip: String, - already_installed: bool, -} - -// More than the timeout in linode-server-provision -#[worker(name = "cluster-server-provision", timeout = 300)] -async fn worker( - ctx: &OperationContext, -) -> GlobalResult<()> { - // TODO: RVTEE-75 - rivet_pools::utils::crdb::tx(&ctx.crdb().await?, |tx| inner(ctx.clone(), tx).boxed()).await?; - - Ok(()) -} - -async fn inner( - ctx: OperationContext, - tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, -) -> GlobalResult<()> { - let datacenter_id = unwrap!(ctx.datacenter_id).as_uuid(); - let server_id = unwrap_ref!(ctx.server_id).as_uuid(); - let pool_type = unwrap!(backend::cluster::PoolType::from_i32(ctx.pool_type)); - let provider = unwrap!(backend::cluster::Provider::from_i32(ctx.provider)); - - // Check if server is already provisioned - // NOTE: sql record already exists before this worker is called - let (provider_server_id,) = sql_fetch_one!( - [ctx, (Option,)] - " - SELECT - provider_server_id - FROM db_cluster.servers - WHERE server_id = $1 - ", - server_id, - ) - .await?; - if let Some(provider_server_id) = provider_server_id { - tracing::warn!( - ?server_id, - ?provider_server_id, - "server is already provisioned" - ); - return Ok(()); - } - - // Fetch datacenter config - let datacenter_res = op!([ctx] cluster_datacenter_get { - datacenter_ids: vec![datacenter_id.into()], - }) - .await?; - let datacenter = unwrap!(datacenter_res.datacenters.first()); - let pool = unwrap!( - datacenter - .pools - .iter() - .find(|p| p.pool_type == ctx.pool_type), - "datacenter does not have this type of pool configured" - ); - - // Get a new vlan ip - let vlan_ip = get_vlan_ip(&ctx, tx, datacenter_id, server_id, pool_type).await?; - - sql_execute!( - [ctx] - " - UPDATE db_cluster.servers - SET vlan_ip = $2 - WHERE server_id = $1 - ", - server_id, - IpAddr::V4(vlan_ip), - ) - .await?; - - // Iterate through list of hardware and attempt to schedule a server. Goes to the next - // hardware if an error happens during provisioning - let mut hardware_list = pool.hardware.iter(); - let provision_res = loop { - // List exhausted - let Some(hardware) = hardware_list.next() else { - break None; - }; - - tracing::info!( - "attempting to provision hardware: {}", - hardware.provider_hardware - ); - - match provider { - backend::cluster::Provider::Linode => { - let res = op!([ctx] linode_server_provision { - datacenter_id: ctx.datacenter_id, - server_id: ctx.server_id, - provider_datacenter_id: datacenter.provider_datacenter_id.clone(), - hardware: Some(hardware.clone()), - pool_type: ctx.pool_type, - vlan_ip: vlan_ip.to_string(), - tags: ctx.tags.clone(), - use_prebakes: datacenter.prebakes_enabled, - }) - .await; - - match res { - Ok(res) => { - break Some(ProvisionResponse { - provider_server_id: res.provider_server_id.clone(), - provider_hardware: hardware.provider_hardware.clone(), - public_ip: res.public_ip.clone(), - already_installed: res.already_installed, - }) - } - Err(err) => { - tracing::error!( - ?err, - ?server_id, - "failed to provision server, cleaning up" - ); - - cleanup(&ctx, server_id).await?; - } - } - } - } - }; - - if let Some(provision_res) = provision_res { - let provision_complete_ts = util::timestamp::now(); - - let (create_ts,) = sql_fetch_one!( - [ctx, (i64,)] - " - UPDATE db_cluster.servers - SET - provider_server_id = $2, - provider_hardware = $3, - public_ip = $4, - provision_complete_ts = $5, - install_complete_ts = $6 - WHERE server_id = $1 - RETURNING create_ts - ", - server_id, - &provision_res.provider_server_id, - &provision_res.provider_hardware, - &provision_res.public_ip, - provision_complete_ts, - if provision_res.already_installed { - Some(provision_complete_ts) - } else { - None - }, - ) - .await?; - - if provision_res.already_installed { - // Create DNS record because the server is already installed - if let backend::cluster::PoolType::Gg = pool_type { - // Source of truth record - sql_execute!( - [ctx] - " - INSERT INTO db_cluster.servers_cloudflare (server_id) - VALUES ($1) - ", - server_id, - ) - .await?; - - msg!([ctx] cluster::msg::server_dns_create(server_id) { - server_id: ctx.server_id, - }) - .await?; - } - } - // Install components on server - else { - let request_id = Uuid::new_v4(); - - msg!([ctx] cluster::msg::server_install(request_id) { - request_id: Some(request_id.into()), - public_ip: provision_res.public_ip, - datacenter_id: ctx.datacenter_id, - server_id: ctx.server_id, - pool_type: ctx.pool_type, - provider: ctx.provider, - initialize_immediately: true, - }) - .await?; - } - - insert_metrics(datacenter, &pool_type, provision_complete_ts, create_ts).await?; - } else { - tracing::error!(?server_id, hardware_options=?pool.hardware.len(), "failed all attempts to provision server"); - bail!("failed all attempts to provision server"); - } - - Ok(()) -} - -async fn get_vlan_ip( - ctx: &OperationContext, - _tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, - datacenter_id: Uuid, - server_id: Uuid, - pool_type: backend::cluster::PoolType, -) -> GlobalResult { - // Find next available vlan index - let mut vlan_addr_range = match pool_type { - PoolType::Job => util::net::job::vlan_addr_range(), - PoolType::Gg => util::net::gg::vlan_addr_range(), - PoolType::Ats => util::net::ats::vlan_addr_range(), - }; - let max_idx = vlan_addr_range.count() as i64; - let (network_idx,) = sql_fetch_one!( - [ctx, (i64,)] - " - WITH - get_next_network_idx AS ( - SELECT mod(idx + $1, $2) AS idx - FROM generate_series(0, $2) AS s(idx) - WHERE NOT EXISTS ( - SELECT 1 - FROM db_cluster.servers - WHERE - pool_type = $3 AND - -- Technically this should check all servers where their datacenter's provider and - -- provider_datacenter_id are the same because VLAN is separated by irl datacenter - -- but this is good enough - datacenter_id = $4 AND - network_idx = mod(idx + $1, $2) AND - cloud_destroy_ts IS NULL - ) - LIMIT 1 - ), - update_network_idx AS ( - UPDATE db_cluster.servers - SET network_idx = (SELECT idx FROM get_next_network_idx) - WHERE server_id = $5 - RETURNING 1 - ) - SELECT idx FROM get_next_network_idx - ", - // Choose a random index to start from for better index spread - rand::thread_rng().gen_range(0i64..max_idx), - max_idx, - pool_type as i64, - datacenter_id, - server_id, - ) - .await?; - - let vlan_ip = unwrap!(vlan_addr_range.nth(network_idx as usize)); - - Ok(vlan_ip) -} - -// This function is used to destroy leftovers from a failed partial provision. -async fn cleanup( - ctx: &OperationContext, - server_id: Uuid, -) -> GlobalResult<()> { - // NOTE: Usually before publishing this message we would set `cloud_destroy_ts`. We do not set it here - // because this message will be retried with the same server id - - // Wait for server to complete destroying so we don't get a primary key conflict (the same server id - // will be used to try and provision the next hardware option) - msg!([ctx] cluster::msg::server_destroy(server_id) -> cluster::msg::server_destroy_complete { - server_id: Some(server_id.into()), - // We force destroy because the provision process failed - force: true, - }) - .await?; - - Ok(()) -} - -async fn insert_metrics( - dc: &backend::cluster::Datacenter, - pool_type: &backend::cluster::PoolType, - provision_complete_ts: i64, - create_ts: i64, -) -> GlobalResult<()> { - let datacenter_id = unwrap_ref!(dc.datacenter_id).as_uuid().to_string(); - let cluster_id = unwrap_ref!(dc.cluster_id).as_uuid().to_string(); - let dt = (provision_complete_ts - create_ts) as f64 / 1000.0; - - metrics::PROVISION_DURATION - .with_label_values(&[ - cluster_id.as_str(), - datacenter_id.as_str(), - &dc.provider_datacenter_id, - &dc.name_id, - match pool_type { - backend::cluster::PoolType::Job => "job", - backend::cluster::PoolType::Gg => "gg", - backend::cluster::PoolType::Ats => "ats", - }, - ]) - .observe(dt); - - Ok(()) -} diff --git a/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/Cargo.toml b/svc/pkg/linode/Cargo.toml similarity index 51% rename from svc/pkg/cluster/ops/datacenter-resolve-for-name-id/Cargo.toml rename to svc/pkg/linode/Cargo.toml index 0ff168c36..d1f1d82e7 100644 --- a/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/Cargo.toml +++ b/svc/pkg/linode/Cargo.toml @@ -1,14 +1,18 @@ [package] -name = "cluster-datacenter-resolve-for-name-id" +name = "linode" version = "0.0.1" edition = "2018" authors = ["Rivet Gaming, LLC "] license = "Apache-2.0" [dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -prost = "0.10" -rivet-operation = { path = "../../../../../lib/operation/core" } +chirp-workflow = { path = "../../../lib/chirp-workflow/core" } +chrono = "0.4" +rand = "0.8" +reqwest = { version = "0.11", features = ["json"] } +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +ssh-key = "0.6.3" [dependencies.sqlx] git = "https://github.com/rivet-gg/sqlx" @@ -16,4 +20,4 @@ rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" default-features = false [dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } +cluster = { path = "../cluster" } diff --git a/svc/pkg/linode/ops/server-provision/Service.toml b/svc/pkg/linode/Service.toml similarity index 69% rename from svc/pkg/linode/ops/server-provision/Service.toml rename to svc/pkg/linode/Service.toml index 40485ec54..86f8b71ab 100644 --- a/svc/pkg/linode/ops/server-provision/Service.toml +++ b/svc/pkg/linode/Service.toml @@ -1,11 +1,14 @@ [service] -name = "linode-server-provision" +name = "linode" [runtime] kind = "rust" -[operation] +[package] [secrets] "linode/token" = { optional = true } "ssh/server/private_key_openssh" = {} + +[databases] +db-linode = {} diff --git a/svc/pkg/linode/db/linode/Service.toml b/svc/pkg/linode/db/linode/Service.toml new file mode 100644 index 000000000..4c08d1950 --- /dev/null +++ b/svc/pkg/linode/db/linode/Service.toml @@ -0,0 +1,7 @@ +[service] +name = "db-linode" + +[runtime] +kind = "crdb" + +[database] diff --git a/svc/pkg/linode/db/linode/migrations/20240705194302_init.down.sql b/svc/pkg/linode/db/linode/migrations/20240705194302_init.down.sql new file mode 100644 index 000000000..e69de29bb diff --git a/svc/pkg/linode/db/linode/migrations/20240705194302_init.up.sql b/svc/pkg/linode/db/linode/migrations/20240705194302_init.up.sql new file mode 100644 index 000000000..888ecbf43 --- /dev/null +++ b/svc/pkg/linode/db/linode/migrations/20240705194302_init.up.sql @@ -0,0 +1,28 @@ +CREATE TABLE server_images ( + id UUID NOT NULL, + + create_ts INT NOT NULL, + destroy_ts INT, + + ssh_key_id INT NOT NULL, + linode_id INT, + firewall_id INT, + disk_id INT, + public_ip INET, + image_id TEXT, + + PRIMARY KEY (install_hash, datacenter_id, firewall_preset) +); + +-- Effectively a conditional primary key +CREATE UNIQUE INDEX idx_server_images_pkey +ON server_images (id) +WHERE destroy_ts IS NULL; + +CREATE INDEX idx_server_images_public_ip +ON server_images (public_ip) +WHERE destroy_ts IS NULL; + +CREATE INDEX idx_server_images_image_id +ON server_images (image_id) +WHERE destroy_ts IS NULL; diff --git a/svc/pkg/linode/ops/instance-type-get/Cargo.toml b/svc/pkg/linode/ops/instance-type-get/Cargo.toml deleted file mode 100644 index fb8fa5f83..000000000 --- a/svc/pkg/linode/ops/instance-type-get/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "linode-instance-type-get" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -rivet-operation = { path = "../../../../../lib/operation/core" } -util-linode = { package = "rivet-util-linode", path = "../../util" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } -util-cluster = { package = "rivet-util-cluster", path = "../../../cluster/util" } diff --git a/svc/pkg/linode/ops/instance-type-get/Service.toml b/svc/pkg/linode/ops/instance-type-get/Service.toml deleted file mode 100644 index 1d9736733..000000000 --- a/svc/pkg/linode/ops/instance-type-get/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "linode-instance-type-get" - -[runtime] -kind = "rust" - -[operation] - -[secrets] -"linode/token" = { optional = true } diff --git a/svc/pkg/linode/ops/instance-type-get/src/lib.rs b/svc/pkg/linode/ops/instance-type-get/src/lib.rs deleted file mode 100644 index 4a83d386e..000000000 --- a/svc/pkg/linode/ops/instance-type-get/src/lib.rs +++ /dev/null @@ -1,43 +0,0 @@ -use proto::backend::pkg::*; -use rivet_operation::prelude::*; -use util_linode::api; - -#[operation(name = "linode-instance-type-get")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - // Build HTTP client - let client = util_linode::Client::new(None).await?; - - // Get hardware stats from linode and cache - let instance_types_res = ctx - .cache() - .ttl(util::duration::days(1)) - .fetch_one_proto("instance_types", "linode", { - let client = client.clone(); - move |mut cache, key| { - let client = client.clone(); - async move { - let api_res = api::list_instance_types(&client).await?; - - cache.resolve( - &key, - linode::instance_type_get::CacheInstanceTypes { - instance_types: api_res.into_iter().map(Into::into).collect::>(), - }, - ); - - Ok(cache) - } - } - }) - .await?; - - let instance_types = unwrap!(instance_types_res) - .instance_types - .into_iter() - .filter(|ty| ctx.hardware_ids.iter().any(|h| h == &ty.hardware_id)) - .collect::>(); - - Ok(linode::instance_type_get::Response { instance_types }) -} diff --git a/svc/pkg/linode/ops/server-destroy/Cargo.toml b/svc/pkg/linode/ops/server-destroy/Cargo.toml deleted file mode 100644 index 23c155ed3..000000000 --- a/svc/pkg/linode/ops/server-destroy/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "linode-server-destroy" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -rivet-operation = { path = "../../../../../lib/operation/core" } -reqwest = { version = "0.11", features = ["json"] } -util-cluster = { package = "rivet-util-cluster", path = "../../../cluster/util" } -util-linode = { package = "rivet-util-linode", path = "../../util" } - -cluster-datacenter-get = { path = "../../../cluster/ops/datacenter-get" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } - -linode-server-provision = { path = "../server-provision" } diff --git a/svc/pkg/linode/ops/server-destroy/Service.toml b/svc/pkg/linode/ops/server-destroy/Service.toml deleted file mode 100644 index be0e245fc..000000000 --- a/svc/pkg/linode/ops/server-destroy/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "linode-server-destroy" - -[runtime] -kind = "rust" - -[operation] - -[secrets] -"linode/token" = { optional = true } diff --git a/svc/pkg/linode/ops/server-destroy/src/lib.rs b/svc/pkg/linode/ops/server-destroy/src/lib.rs deleted file mode 100644 index e90978052..000000000 --- a/svc/pkg/linode/ops/server-destroy/src/lib.rs +++ /dev/null @@ -1,72 +0,0 @@ -use proto::backend::pkg::*; -use rivet_operation::prelude::*; -use util_linode::api; - -#[derive(sqlx::FromRow)] -struct LinodeData { - ssh_key_id: i64, - linode_id: Option, - firewall_id: Option, -} - -#[operation(name = "linode-server-destroy")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let server_id = unwrap_ref!(ctx.server_id).as_uuid(); - let datacenter_id = unwrap!(ctx.datacenter_id); - - let datacenter_res = op!([ctx] cluster_datacenter_get { - datacenter_ids: vec![datacenter_id], - }) - .await?; - let datacenter = unwrap!(datacenter_res.datacenters.first()); - - let data = sql_fetch_optional!( - [ctx, LinodeData] - " - SELECT ssh_key_id, linode_id, firewall_id - FROM db_cluster.servers_linode - WHERE - server_id = $1 AND - destroy_ts IS NULL - ", - server_id, - ) - .await?; - - let Some(data) = data else { - tracing::warn!("deleting server that doesn't exist"); - return Ok(linode::server_destroy::Response {}); - }; - - // Build HTTP client - let client = util_linode::Client::new(datacenter.provider_api_token.clone()).await?; - - if let Some(linode_id) = data.linode_id { - api::delete_instance(&client, linode_id).await?; - } - - api::delete_ssh_key(&client, data.ssh_key_id).await?; - - if let Some(firewall_id) = data.firewall_id { - api::delete_firewall(&client, firewall_id).await?; - } - - // Remove record - sql_execute!( - [ctx] - " - UPDATE db_cluster.servers_linode - SET destroy_ts = $2 - WHERE - server_id = $1 AND - destroy_ts IS NULL - ", - server_id, - util::timestamp::now(), - ) - .await?; - - Ok(linode::server_destroy::Response {}) -} diff --git a/svc/pkg/linode/ops/server-provision/Cargo.toml b/svc/pkg/linode/ops/server-provision/Cargo.toml deleted file mode 100644 index a7e25cafb..000000000 --- a/svc/pkg/linode/ops/server-provision/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "linode-server-provision" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -rivet-operation = { path = "../../../../../lib/operation/core" } -reqwest = { version = "0.11", features = ["json"] } -util-cluster = { package = "rivet-util-cluster", path = "../../../cluster/util" } -util-linode = { package = "rivet-util-linode", path = "../../util" } - -cluster-datacenter-get = { path = "../../../cluster/ops/datacenter-get" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } - -linode-server-destroy = { path = "../server-destroy" } diff --git a/svc/pkg/linode/ops/server-provision/README.md b/svc/pkg/linode/ops/server-provision/README.md deleted file mode 100644 index 7b0f7f2d1..000000000 --- a/svc/pkg/linode/ops/server-provision/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# linode-server-provision - -This was meant to be agnostic to all other packages and simply create a server on Linode, but because of -custom API keys and prebake images we need to include a `datacenter_id` in the request. In the future and if -needed this can be made optional so that this endpoint does not require a `datacenter_id`. diff --git a/svc/pkg/linode/ops/server-provision/src/lib.rs b/svc/pkg/linode/ops/server-provision/src/lib.rs deleted file mode 100644 index d08dd1c2f..000000000 --- a/svc/pkg/linode/ops/server-provision/src/lib.rs +++ /dev/null @@ -1,266 +0,0 @@ -use proto::backend::{self, cluster::PoolType, pkg::*}; -use rivet_operation::prelude::*; -use util_linode::api; - -// Less than the timeout in cluster-server-provision -#[operation(name = "linode-server-provision", timeout = 245)] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let crdb = ctx.crdb().await?; - let server_id = unwrap_ref!(ctx.server_id).as_uuid(); - let datacenter_id = unwrap_ref!(ctx.datacenter_id).as_uuid(); - let provider_datacenter_id = ctx.provider_datacenter_id.clone(); - let pool_type = unwrap!(PoolType::from_i32(ctx.pool_type)); - let provider_hardware = unwrap_ref!(ctx.hardware).provider_hardware.clone(); - - let datacenter_res = op!([ctx] cluster_datacenter_get { - datacenter_ids: vec![datacenter_id.into()], - }) - .await?; - let datacenter = unwrap!(datacenter_res.datacenters.first()); - - let ns = util::env::namespace(); - let pool_type_str = match pool_type { - PoolType::Job => "job", - PoolType::Gg => "gg", - PoolType::Ats => "ats", - }; - // Linode label must be 3-64 characters, UUID's are 36 - let name = format!("{ns}-{server_id}"); - - let tags = ctx - .tags - .iter() - .cloned() - .chain([ - // HACK: Linode requires tags to be > 3 characters. We extend the namespace to make sure it - // meets the minimum length requirement. - format!("rivet-{ns}"), - format!("{ns}-{provider_datacenter_id}"), - format!("{ns}-{pool_type_str}"), - format!("{ns}-{provider_datacenter_id}-{pool_type_str}"), - ]) - .collect::>(); - - let firewall_inbound = match pool_type { - PoolType::Job => util::net::job::firewall(), - PoolType::Gg => util::net::gg::firewall(), - PoolType::Ats => util::net::ats::firewall(), - }; - - // Build context - let server = api::ProvisionCtx { - datacenter: provider_datacenter_id, - name, - hardware: provider_hardware, - vlan_ip: Some(ctx.vlan_ip.clone()), - tags, - firewall_inbound, - }; - - // Build HTTP client - let client = util_linode::Client::new(datacenter.provider_api_token.clone()).await?; - - // Create SSH key - let ssh_key_label = format!("{ns}-{server_id}"); - let ssh_key_res = api::create_ssh_key( - &client, - &ssh_key_label, - ctx.tags.iter().any(|tag| tag == "test"), - ) - .await?; - - // Write SSH key id - sql_execute!( - [ctx, &crdb] - " - INSERT INTO db_cluster.servers_linode ( - server_id, - ssh_key_id - ) - VALUES ($1, $2) - ", - server_id, - ssh_key_res.id as i64, - ) - .await?; - - let create_instance_res = - api::create_instance(&client, &server, &ssh_key_res.public_key).await?; - let linode_id = create_instance_res.id; - - // Write linode id - sql_execute!( - [ctx, &crdb] - " - UPDATE db_cluster.servers_linode - SET linode_id = $2 - WHERE - server_id = $1 AND - destroy_ts IS NULL - ", - server_id, - linode_id as i64, - ) - .await?; - - api::wait_instance_ready(&client, linode_id).await?; - - let (create_disks_res, used_custom_image) = create_disks( - &ctx, - &crdb, - &client, - CreateDisks { - provider_datacenter_id: &server.datacenter, - datacenter_id, - pool_type, - ssh_key: &ssh_key_res.public_key, - linode_id, - server_disk_size: create_instance_res.specs.disk, - }, - ) - .await?; - - api::create_instance_config(&client, &server, linode_id, &create_disks_res).await?; - - let firewall_res = api::create_firewall(&client, &server, linode_id).await?; - - // Write firewall id - sql_execute!( - [ctx, &crdb] - " - UPDATE db_cluster.servers_linode - SET firewall_id = $2 - WHERE - server_id = $1 AND - destroy_ts IS NULL - ", - server_id, - firewall_res.id as i64, - ) - .await?; - - api::boot_instance(&client, linode_id).await?; - - let public_ip = api::get_public_ip(&client, linode_id).await?; - - Ok(linode::server_provision::Response { - provider_server_id: linode_id.to_string(), - public_ip: public_ip.to_string(), - already_installed: used_custom_image, - }) -} - -struct CreateDisks<'a> { - provider_datacenter_id: &'a str, - datacenter_id: Uuid, - pool_type: PoolType, - ssh_key: &'a str, - linode_id: u64, - server_disk_size: u64, -} - -async fn create_disks( - ctx: &OperationContext, - crdb: &CrdbPool, - client: &util_linode::Client, - opts: CreateDisks<'_>, -) -> GlobalResult<(api::CreateDisksResponse, bool)> { - // Try to get custom image (if exists) - let (custom_image, updated) = if ctx.use_prebakes { - get_custom_image(ctx, crdb, opts.datacenter_id, opts.pool_type).await? - } else { - (None, false) - }; - - // Default image - let used_custom_image = custom_image.is_some(); - let image = if let Some(custom_image) = custom_image { - tracing::info!("using custom image {}", custom_image); - - custom_image - } else { - tracing::info!("custom image not ready yet, continuing normally"); - - "linode/debian11".to_string() - }; - - // Start custom image creation process - if updated { - msg!([ctx] linode::msg::prebake_provision(opts.datacenter_id, opts.pool_type as i32) { - datacenter_id: ctx.datacenter_id, - pool_type: opts.pool_type as i32, - provider_datacenter_id: opts.provider_datacenter_id.to_string(), - tags: Vec::new(), - }) - .await?; - } - - let create_disks_res = api::create_disks( - client, - opts.ssh_key, - opts.linode_id, - &image, - opts.server_disk_size, - ) - .await?; - - Ok((create_disks_res, used_custom_image)) -} - -async fn get_custom_image( - ctx: &OperationContext, - crdb: &CrdbPool, - datacenter_id: Uuid, - pool_type: PoolType, -) -> GlobalResult<(Option, bool)> { - let provider = backend::cluster::Provider::Linode; - - // Get the custom image id for this server, or insert a record and start creating one - let (image_id, updated) = sql_fetch_one!( - [ctx, (Option, bool), &crdb] - " - WITH - updated AS ( - INSERT INTO db_cluster.server_images AS s ( - provider, install_hash, datacenter_id, pool_type, create_ts - ) - VALUES ($1, $2, $3, $4, $5) - ON CONFLICT (provider, install_hash, datacenter_id, pool_type) DO UPDATE - SET - provider_image_id = NULL, - create_ts = $5 - WHERE s.create_ts < $6 - RETURNING provider, install_hash, datacenter_id, pool_type - ), - selected AS ( - SELECT provider, install_hash, datacenter_id, pool_type, provider_image_id - FROM db_cluster.server_images - WHERE - provider = $1 AND - install_hash = $2 AND - datacenter_id = $3 AND - pool_type = $4 - ) - SELECT - selected.provider_image_id, - -- Primary key is not null - (updated.provider IS NOT NULL) AS updated - FROM selected - FULL OUTER JOIN updated - ON true - ", - provider as i64, - util_cluster::INSTALL_SCRIPT_HASH, - datacenter_id, - pool_type as i64, - util::timestamp::now(), - // 5 month expiration - util::timestamp::now() - util::duration::days(5 * 30), - ) - .await?; - - // Updated is true if this specific sql call either reset (if expired) or inserted the row - Ok((if updated { None } else { image_id }, updated)) -} diff --git a/svc/pkg/linode/src/lib.rs b/svc/pkg/linode/src/lib.rs new file mode 100644 index 000000000..1ebf758e3 --- /dev/null +++ b/svc/pkg/linode/src/lib.rs @@ -0,0 +1,15 @@ +use chirp_workflow::prelude::*; + +pub mod ops; +pub mod types; +pub mod util; +pub mod workflows; + +pub fn registry() -> Registry { + use workflows::*; + + let mut registry = Registry::new(); + registry.register_workflow::(); + + registry +} diff --git a/svc/pkg/linode/src/ops/instance_type_get.rs b/svc/pkg/linode/src/ops/instance_type_get.rs new file mode 100644 index 000000000..69eeed103 --- /dev/null +++ b/svc/pkg/linode/src/ops/instance_type_get.rs @@ -0,0 +1,53 @@ +use chirp_workflow::prelude::*; + +use crate::{ + types::InstanceType, + util::{api, client}, +}; + +pub struct Input { + pub hardware_ids: Vec, +} + +pub struct Output { + pub instance_types: Vec, +} + +#[operation] +pub async fn linode_instance_type_get(ctx: &OperationCtx, input: &Input) -> GlobalResult { + // Build HTTP client + let client = client::Client::new(None).await?; + + // Get hardware stats from linode and cache + let instance_types_res = ctx + .cache() + .ttl(util::duration::days(1)) + .fetch_one_json("instance_types", "linode", { + let client = client.clone(); + move |mut cache, key| { + let client = client.clone(); + async move { + let api_res = api::list_instance_types(&client).await?; + + cache.resolve( + &key, + api_res + .into_iter() + .map(Into::::into) + .collect::>(), + ); + + Ok(cache) + } + } + }) + .await?; + + // Filter by hardware + let instance_types = unwrap!(instance_types_res) + .into_iter() + .filter(|ty| input.hardware_ids.iter().any(|h| h == &ty.hardware_id)) + .collect::>(); + + Ok(Output { instance_types }) +} diff --git a/svc/pkg/linode/src/ops/mod.rs b/svc/pkg/linode/src/ops/mod.rs new file mode 100644 index 000000000..ff2e5a372 --- /dev/null +++ b/svc/pkg/linode/src/ops/mod.rs @@ -0,0 +1 @@ +pub mod instance_type_get; diff --git a/svc/pkg/linode/src/types.rs b/svc/pkg/linode/src/types.rs new file mode 100644 index 000000000..12f57d922 --- /dev/null +++ b/svc/pkg/linode/src/types.rs @@ -0,0 +1,38 @@ +use chirp_workflow::prelude::*; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Serialize, Deserialize)] +pub struct InstanceType { + pub hardware_id: String, + pub memory: u64, + pub disk: u64, + pub vcpus: u64, + pub transfer: u64, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Hash)] +pub enum FirewallPreset { + Job, + Gg, + Ats, +} + +impl FirewallPreset { + pub fn rules(&self) -> Vec { + match self { + FirewallPreset::Job => util::net::job::firewall(), + FirewallPreset::Gg => util::net::gg::firewall(), + FirewallPreset::Ats => util::net::ats::firewall(), + } + } +} + +impl std::fmt::Display for FirewallPreset { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + FirewallPreset::Job => write!(f, "job"), + FirewallPreset::Gg => write!(f, "gg"), + FirewallPreset::Ats => write!(f, "ats"), + } + } +} diff --git a/svc/pkg/linode/util/src/api.rs b/svc/pkg/linode/src/util/api.rs similarity index 89% rename from svc/pkg/linode/util/src/api.rs rename to svc/pkg/linode/src/util/api.rs index 6f7f59754..857327595 100644 --- a/svc/pkg/linode/util/src/api.rs +++ b/svc/pkg/linode/src/util/api.rs @@ -1,22 +1,18 @@ use std::{net::Ipv4Addr, str, time::Duration}; +use chirp_workflow::prelude::*; use chrono::{DateTime, Utc}; -use proto::backend::pkg::*; -use rivet_operation::prelude::*; use serde::{Deserialize, Deserializer}; use serde_json::json; use ssh_key::PrivateKey; -use crate::{generate_password, ApiErrorResponse, Client}; - -pub struct ProvisionCtx { - pub datacenter: String, - pub name: String, - pub hardware: String, - pub vlan_ip: Option, - pub tags: Vec, - pub firewall_inbound: Vec, -} +use crate::{ + types::FirewallPreset, + util::{ + client::{ApiErrorResponse, Client}, + generate_password, + }, +}; #[derive(Deserialize)] struct CreateSshKeyResponse { @@ -79,7 +75,10 @@ pub struct InstanceSpec { pub async fn create_instance( client: &Client, - server: &ProvisionCtx, + name: &str, + datacenter: &str, + hardware: &str, + tags: &[String], ssh_key: &str, ) -> GlobalResult { let ns = util::env::namespace(); @@ -90,12 +89,12 @@ pub async fn create_instance( .post( "/linode/instances", json!({ - "label": server.name, + "label": name, "group": ns, - "region": server.datacenter, - "type": server.hardware, + "region": datacenter, + "type": hardware, "authorized_keys": vec![ssh_key], - "tags": server.tags, + "tags": tags, "private_ip": true, "backups_enabled": false, }), @@ -158,15 +157,16 @@ pub async fn create_disks( pub async fn create_instance_config( client: &Client, - server: &ProvisionCtx, + vlan_ip: Option<&Ipv4Addr>, linode_id: u64, - disks: &CreateDisksResponse, + boot_disk_id: u64, + swap_disk_id: u64, ) -> GlobalResult<()> { tracing::info!("creating instance config"); let ns = util::env::namespace(); - let interfaces = if let Some(vlan_ip) = &server.vlan_ip { + let interfaces = if let Some(vlan_ip) = vlan_ip { let region_vlan = util::net::region::vlan_ip_net(); let ipam_address = format!("{}/{}", vlan_ip, region_vlan.prefix_len()); @@ -196,10 +196,10 @@ pub async fn create_instance_config( "root_device": "/dev/sda", "devices": { "sda": { - "disk_id": disks.boot_id, + "disk_id": boot_disk_id, }, "sdb": { - "disk_id": disks.swap_id, + "disk_id": swap_disk_id }, }, "interfaces": interfaces, @@ -215,15 +215,16 @@ pub struct CreateFirewallResponse { pub async fn create_firewall( client: &Client, - server: &ProvisionCtx, + firewall: &FirewallPreset, + tags: &[String], linode_id: u64, ) -> GlobalResult { tracing::info!("creating firewall"); let ns = util::env::namespace(); - let firewall_inbound = server - .firewall_inbound + let firewall_inbound = firewall + .rules() .iter() .map(|rule| { json!({ @@ -254,7 +255,7 @@ pub async fn create_firewall( "devices": { "linodes": [linode_id], }, - "tags": server.tags, + "tags": tags, }), ) .await @@ -364,7 +365,7 @@ pub async fn get_public_ip(client: &Client, linode_id: u64) -> GlobalResult GlobalResult<()> { +pub async fn delete_ssh_key(client: &Client, ssh_key_id: u64) -> GlobalResult<()> { tracing::info!("deleting linode ssh key"); client @@ -372,7 +373,7 @@ pub async fn delete_ssh_key(client: &Client, ssh_key_id: i64) -> GlobalResult<() .await } -pub async fn delete_instance(client: &Client, linode_id: i64) -> GlobalResult<()> { +pub async fn delete_instance(client: &Client, linode_id: u64) -> GlobalResult<()> { tracing::info!(?linode_id, "deleting linode instance"); client @@ -380,7 +381,7 @@ pub async fn delete_instance(client: &Client, linode_id: i64) -> GlobalResult<() .await } -pub async fn delete_firewall(client: &Client, firewall_id: i64) -> GlobalResult<()> { +pub async fn delete_firewall(client: &Client, firewall_id: u64) -> GlobalResult<()> { tracing::info!("deleting firewall"); client @@ -388,7 +389,7 @@ pub async fn delete_firewall(client: &Client, firewall_id: i64) -> GlobalResult< .await } -pub async fn shut_down(client: &Client, linode_id: i64) -> GlobalResult<()> { +pub async fn shut_down(client: &Client, linode_id: u64) -> GlobalResult<()> { tracing::info!("shutting down instance"); client @@ -446,10 +447,17 @@ pub struct CustomImage { pub async fn list_custom_images(client: &Client) -> GlobalResult> { tracing::info!("listing custom images"); + let ns = util::env::namespace(); + let req = client .inner() .get("https://api.linode.com/v4/images") - .query(&[("page_size", CUSTOM_IMAGE_LIST_SIZE)]); + .query(&[("page_size", CUSTOM_IMAGE_LIST_SIZE)]) + // Filter this namespace only + .header( + "X-Filter", + format!(r#"{{ "label": {{ "+contains": "{ns}-" }} }}"#), + ); let res = client .request(req, None, false) @@ -485,9 +493,9 @@ pub struct InstanceType { pub network_out: u64, } -impl From for linode::instance_type_get::response::InstanceType { +impl From for crate::types::InstanceType { fn from(value: InstanceType) -> Self { - linode::instance_type_get::response::InstanceType { + crate::types::InstanceType { hardware_id: value.id, memory: value.memory, disk: value.disk, diff --git a/svc/pkg/linode/util/src/lib.rs b/svc/pkg/linode/src/util/client.rs similarity index 93% rename from svc/pkg/linode/util/src/lib.rs rename to svc/pkg/linode/src/util/client.rs index 9ee833285..51900cf5e 100644 --- a/svc/pkg/linode/util/src/lib.rs +++ b/svc/pkg/linode/src/util/client.rs @@ -1,13 +1,9 @@ use std::{fmt, time::Duration}; -use rand::{distributions::Alphanumeric, Rng}; +use chirp_workflow::prelude::*; use reqwest::header; -use rivet_operation::prelude::*; use serde::{de::DeserializeOwned, Deserialize}; -pub mod api; -pub mod consts; - #[derive(Clone)] pub struct Client { // Safe to clone, has inner Arc @@ -201,12 +197,3 @@ struct ApiError { field: Option, reason: String, } - -/// Generates a random string for a secret. -pub(crate) fn generate_password(length: usize) -> String { - rand::thread_rng() - .sample_iter(&Alphanumeric) - .take(length) - .map(char::from) - .collect() -} diff --git a/svc/pkg/linode/util/src/consts.rs b/svc/pkg/linode/src/util/consts.rs similarity index 100% rename from svc/pkg/linode/util/src/consts.rs rename to svc/pkg/linode/src/util/consts.rs diff --git a/svc/pkg/cluster/util/src/lib.rs b/svc/pkg/linode/src/util/mod.rs similarity index 53% rename from svc/pkg/cluster/util/src/lib.rs rename to svc/pkg/linode/src/util/mod.rs index 16817b893..4f44d66e7 100644 --- a/svc/pkg/cluster/util/src/lib.rs +++ b/svc/pkg/linode/src/util/mod.rs @@ -1,12 +1,8 @@ -use types::rivet::backend::{self, pkg::*}; -use uuid::Uuid; +use rand::{distributions::Alphanumeric, Rng}; -pub mod metrics; -pub mod test; - -// Use the hash of the server install script in the image variant so that if the install scripts are updated -// we won't be using the old image anymore -pub const INSTALL_SCRIPT_HASH: &str = include_str!(concat!(env!("OUT_DIR"), "/hash.txt")); +pub mod api; +pub mod client; +pub mod consts; // NOTE: We don't reserve CPU because Nomad is running as a higher priority process than the rest and // shouldn't be doing much heavy lifting. @@ -17,9 +13,6 @@ const RESERVE_MEMORY: u64 = RESERVE_SYSTEM_MEMORY + RESERVE_LB_MEMORY; const CPU_PER_CORE: u64 = 1999; -// TTL of the token written to prebake images. Prebake images are renewed before the token would expire -pub const SERVER_TOKEN_TTL: i64 = rivet_util::duration::days(30 * 6); - /// Provider agnostic hardware specs. #[derive(Debug)] pub struct JobNodeConfig { @@ -31,9 +24,7 @@ pub struct JobNodeConfig { } impl JobNodeConfig { - pub fn from_linode( - instance_type: &linode::instance_type_get::response::InstanceType, - ) -> JobNodeConfig { + pub fn from_linode(instance_type: &crate::types::InstanceType) -> JobNodeConfig { // Account for kernel memory overhead // https://www.linode.com/community/questions/17791/why-doesnt-free-m-match-the-full-amount-of-ram-of-my-nanode-plan let memory = instance_type.memory * 96 / 100; @@ -66,22 +57,11 @@ impl JobNodeConfig { } } -// Cluster id for provisioning servers -pub fn default_cluster_id() -> Uuid { - Uuid::nil() -} - -pub fn server_name( - provider_datacenter_id: &str, - pool_type: backend::cluster::PoolType, - server_id: Uuid, -) -> String { - let ns = rivet_util::env::namespace(); - let pool_type_str = match pool_type { - backend::cluster::PoolType::Job => "job", - backend::cluster::PoolType::Gg => "gg", - backend::cluster::PoolType::Ats => "ats", - }; - - format!("{ns}-{provider_datacenter_id}-{pool_type_str}-{server_id}",) +/// Generates a random string for a secret. +pub(crate) fn generate_password(length: usize) -> String { + rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(length) + .map(char::from) + .collect() } diff --git a/svc/pkg/linode/src/workflows/image.rs b/svc/pkg/linode/src/workflows/image.rs new file mode 100644 index 000000000..e69de29bb diff --git a/svc/pkg/linode/src/workflows/mod.rs b/svc/pkg/linode/src/workflows/mod.rs new file mode 100644 index 000000000..74f47ad34 --- /dev/null +++ b/svc/pkg/linode/src/workflows/mod.rs @@ -0,0 +1 @@ +pub mod server; diff --git a/svc/pkg/linode/worker/src/workers/prebake_install_complete.rs b/svc/pkg/linode/src/workflows/prebake_install_complete.rs similarity index 100% rename from svc/pkg/linode/worker/src/workers/prebake_install_complete.rs rename to svc/pkg/linode/src/workflows/prebake_install_complete.rs diff --git a/svc/pkg/linode/worker/src/workers/prebake_provision.rs b/svc/pkg/linode/src/workflows/prebake_provision.rs similarity index 100% rename from svc/pkg/linode/worker/src/workers/prebake_provision.rs rename to svc/pkg/linode/src/workflows/prebake_provision.rs diff --git a/svc/pkg/linode/src/workflows/server.rs b/svc/pkg/linode/src/workflows/server.rs new file mode 100644 index 000000000..3b855ae81 --- /dev/null +++ b/svc/pkg/linode/src/workflows/server.rs @@ -0,0 +1,493 @@ +use std::net::Ipv4Addr; + +use chirp_workflow::prelude::*; +use serde_json::json; + +use crate::{ + types::FirewallPreset, + util::{api, client}, +}; + +const DEFAULT_IMAGE: &str = "linode/debian11"; + +#[derive(Debug, Serialize, Deserialize)] +pub struct Input { + pub server_id: Uuid, + pub provider_datacenter_id: String, + pub custom_image: Option, + pub hardware: String, + pub api_token: Option, + pub firewall_preset: FirewallPreset, + pub vlan_ip: Ipv4Addr, + pub tags: Vec, +} + +#[workflow] +pub async fn linode_server(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResult<()> { + let is_test = input.tags.iter().any(|tag| tag == "test"); + let ns = util::env::namespace(); + // Linode label must be 3-64 characters, UUID's are 36 + let name = format!("{ns}-{}", input.server_id); + + let tags = input + .tags + .iter() + .cloned() + .chain([ + // HACK: Linode requires tags to be > 3 characters. We extend the namespace to make sure it + // meets the minimum length requirement. + format!("rivet-{ns}"), + format!("{ns}-{}", input.provider_datacenter_id), + format!("{ns}-{}", input.firewall_preset), + format!( + "{ns}-{}-{}", + input.provider_datacenter_id, input.firewall_preset + ), + ]) + .collect::>(); + + let ssh_key_res = ctx + .activity(CreateSshKeyInput { + server_id: input.server_id, + api_token: input.api_token.clone(), + is_test, + }) + .await?; + + let create_instance_res = ctx + .activity(CreateInstanceInput { + server_id: input.server_id, + api_token: input.api_token.clone(), + ssh_public_key: ssh_key_res.public_key.clone(), + name, + datacenter: input.provider_datacenter_id.clone(), + hardware: input.hardware.clone(), + tags: tags.clone(), + }) + .await?; + + ctx.activity(WaitInstanceReadyInput { + api_token: input.api_token.clone(), + linode_id: create_instance_res.linode_id, + }) + .await?; + + // let image_res = ctx + // .activity(GetImageInput { + // api_token: input.api_token.clone(), + // datacenter_id: input.datacenter_id, + // pool_type: input.pool_type, + // }) + // .await?; + + // // Start custom image creation process + // if image_res.updated { + // msg!([ctx] linode::msg::prebake_provision(opts.datacenter_id, opts.pool_type as i32) { + // datacenter_id: ctx.datacenter_id, + // pool_type: opts.pool_type as i32, + // provider_datacenter_id: opts.provider_datacenter_id.to_string(), + // tags: Vec::new(), + // }) + // .await?; + // } + + let disks_res = ctx + .activity(CreateDisksInput { + api_token: input.api_token.clone(), + image: input + .custom_image + .clone() + .unwrap_or_else(|| DEFAULT_IMAGE.to_string()), + ssh_public_key: ssh_key_res.public_key.clone(), + linode_id: create_instance_res.linode_id, + disk_size: create_instance_res.server_disk_size, + }) + .await?; + + ctx.activity(CreateInstanceConfigInput { + api_token: input.api_token.clone(), + vlan_ip: input.vlan_ip, + linode_id: create_instance_res.linode_id, + disks: disks_res, + }) + .await?; + + let firewall_id = ctx + .activity(CreateFirewallInput { + server_id: input.server_id, + api_token: input.api_token.clone(), + firewall_preset: input.firewall_preset.clone(), + tags, + linode_id: create_instance_res.linode_id, + }) + .await?; + + ctx.activity(BootInstanceInput { + api_token: input.api_token.clone(), + linode_id: create_instance_res.linode_id, + }) + .await?; + + let public_ip = ctx + .activity(GetPublicIpInput { + api_token: input.api_token.clone(), + linode_id: create_instance_res.linode_id, + }) + .await?; + + ctx.tagged_signal( + &json!({ + "server_id": input.server_id, + }), + ProvisionComplete { + linode_id: create_instance_res.linode_id, + public_ip, + }, + ) + .await?; + + ctx.listen::().await?; + + ctx.activity(DestroyInstanceInput { + api_token: input.api_token.clone(), + ssh_key_id: ssh_key_res.ssh_key_id, + linode_id: create_instance_res.linode_id, + firewall_id, + }) + .await?; + + Ok(()) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct CreateSshKeyInput { + server_id: Uuid, + api_token: Option, + is_test: bool, +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct CreateSshKeyOutput { + ssh_key_id: u64, + public_key: String, +} + +#[activity(CreateSshKey)] +async fn create_ssh_key( + ctx: &ActivityCtx, + input: &CreateSshKeyInput, +) -> GlobalResult { + // Build HTTP client + let client = client::Client::new(input.api_token.clone()).await?; + + let ns = util::env::namespace(); + + let ssh_key_label = format!("{ns}-{}", input.server_id); + let ssh_key_res = api::create_ssh_key(&client, &ssh_key_label, input.is_test).await?; + + Ok(CreateSshKeyOutput { + ssh_key_id: ssh_key_res.id, + public_key: ssh_key_res.public_key, + }) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct CreateInstanceInput { + server_id: Uuid, + api_token: Option, + ssh_public_key: String, + name: String, + datacenter: String, + hardware: String, + tags: Vec, +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct CreateInstanceOutput { + linode_id: u64, + server_disk_size: u64, +} + +#[activity(CreateInstance)] +async fn create_instance( + ctx: &ActivityCtx, + input: &CreateInstanceInput, +) -> GlobalResult { + // Build HTTP client + let client = client::Client::new(input.api_token.clone()).await?; + + let create_instance_res = api::create_instance( + &client, + &input.name, + &input.datacenter, + &input.hardware, + &input.tags, + &input.ssh_public_key, + ) + .await?; + let linode_id = create_instance_res.id; + + Ok(CreateInstanceOutput { + linode_id, + server_disk_size: create_instance_res.specs.disk, + }) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct WaitInstanceReadyInput { + api_token: Option, + linode_id: u64, +} + +#[activity(WaitInstanceReady)] +async fn wait_instance_ready( + ctx: &ActivityCtx, + input: &WaitInstanceReadyInput, +) -> GlobalResult<()> { + // Build HTTP client + let client = client::Client::new(input.api_token.clone()).await?; + + api::wait_instance_ready(&client, input.linode_id).await +} + +// #[derive(Debug, Serialize, Deserialize, Hash)] +// struct GetImageInput { +// server_id: Uuid, +// api_token: Option, +// provider_datacenter_id: String, +// datacenter_id: Uuid, +// pool_type: PoolType, +// ssh_key: String, +// linode_id: u64, +// server_disk_size: u64, +// } + +// #[derive(Debug, Serialize, Deserialize, Hash)] +// struct GetImageOutput { +// custom_image: Option, +// updated: bool, +// } + +// #[activity(GetImage)] +// async fn get_image(ctx: &ActivityCtx, input: &GetImageInput) -> GlobalResult { +// // Try to get custom image (if exists) +// let (custom_image, updated) = if input.use_prebakes { +// let provider = Provider::Linode; + +// // Get the custom image id for this server, or insert a record and start creating one +// let (image_id, updated) = sql_fetch_one!( +// [ctx, (Option, bool)] +// " +// WITH +// updated AS ( +// INSERT INTO db_cluster.server_images2 AS s ( +// provider, install_hash, datacenter_id, pool_type, create_ts +// ) +// VALUES ($1, $2, $3, $4, $5) +// ON CONFLICT (provider, install_hash, datacenter_id, pool_type) DO UPDATE +// SET +// provider_image_id = NULL, +// create_ts = $5 +// WHERE s.create_ts < $6 +// RETURNING provider, install_hash, datacenter_id, pool_type +// ), +// selected AS ( +// SELECT provider, install_hash, datacenter_id, pool_type, provider_image_id +// FROM db_cluster.server_images2 +// WHERE +// provider = $1 AND +// install_hash = $2 AND +// datacenter_id = $3 AND +// pool_type = $4 +// ) +// SELECT +// selected.provider_image_id, +// -- Primary key is not null +// (updated.provider IS NOT NULL) AS updated +// FROM selected +// FULL OUTER JOIN updated +// ON true +// ", +// provider as i64, +// crate::util::INSTALL_SCRIPT_HASH, +// input.datacenter_id, +// input.pool_type as i64, +// util::timestamp::now(), +// // 5 month expiration +// util::timestamp::now() - util::duration::days(5 * 30), +// ) +// .await?; + +// // Updated is true if this specific sql call either reset (if expired) or inserted the row +// Ok((if updated { None } else { image_id }, updated)) +// } else { +// Ok((None, false)) +// }; + +// // Default image +// let used_custom_image = custom_image.is_some(); +// let image = if let Some(custom_image) = custom_image { +// tracing::info!("using custom image {}", custom_image); + +// custom_image +// } else { +// tracing::info!("custom image not ready yet, continuing normally"); + +// "linode/debian11".to_string() +// }; + +// Ok(GetImageOutput { image, updated }) +// } + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct CreateDisksInput { + api_token: Option, + image: String, + ssh_public_key: String, + linode_id: u64, + disk_size: u64, +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct CreateDisksOutput { + boot_id: u64, + swap_id: u64, +} + +#[activity(CreateDisks)] +async fn create_disks( + ctx: &ActivityCtx, + input: &CreateDisksInput, +) -> GlobalResult { + // Build HTTP client + let client = client::Client::new(input.api_token.clone()).await?; + + let create_disks_res = api::create_disks( + &client, + &input.ssh_public_key, + input.linode_id, + &input.image, + input.disk_size, + ) + .await?; + + Ok(CreateDisksOutput { + boot_id: create_disks_res.boot_id, + swap_id: create_disks_res.swap_id, + }) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct CreateInstanceConfigInput { + api_token: Option, + vlan_ip: Ipv4Addr, + linode_id: u64, + disks: CreateDisksOutput, +} + +#[activity(CreateInstanceConfig)] +async fn create_instance_config( + ctx: &ActivityCtx, + input: &CreateInstanceConfigInput, +) -> GlobalResult<()> { + // Build HTTP client + let client = client::Client::new(input.api_token.clone()).await?; + + api::create_instance_config( + &client, + Some(&input.vlan_ip), + input.linode_id, + input.disks.boot_id, + input.disks.swap_id, + ) + .await +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct CreateFirewallInput { + server_id: Uuid, + api_token: Option, + firewall_preset: FirewallPreset, + tags: Vec, + linode_id: u64, +} + +#[activity(CreateFirewall)] +async fn create_firewall(ctx: &ActivityCtx, input: &CreateFirewallInput) -> GlobalResult { + // Build HTTP client + let client = client::Client::new(input.api_token.clone()).await?; + + let firewall_res = api::create_firewall( + &client, + &input.firewall_preset, + &input.tags, + input.linode_id, + ) + .await?; + + Ok(firewall_res.id) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct BootInstanceInput { + api_token: Option, + linode_id: u64, +} + +#[activity(BootInstance)] +async fn boot_instance(ctx: &ActivityCtx, input: &BootInstanceInput) -> GlobalResult<()> { + // Build HTTP client + let client = client::Client::new(input.api_token.clone()).await?; + + api::boot_instance(&client, input.linode_id).await?; + + Ok(()) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct GetPublicIpInput { + api_token: Option, + linode_id: u64, +} + +#[activity(GetPublicIp)] +async fn get_public_ip(ctx: &ActivityCtx, input: &GetPublicIpInput) -> GlobalResult { + // Build HTTP client + let client = client::Client::new(input.api_token.clone()).await?; + + api::get_public_ip(&client, input.linode_id).await +} + +#[signal("linode-server-provision-complete")] +pub struct ProvisionComplete { + pub linode_id: u64, + pub public_ip: Ipv4Addr, +} + +#[signal("linode-server-provision-failed")] +pub struct ProvisionFailed { + pub err: String, +} + +#[signal("linode-server-destroy")] +pub struct Destroy {} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct DestroyInstanceInput { + api_token: Option, + linode_id: u64, + ssh_key_id: u64, + firewall_id: u64, +} + +#[activity(DestroyInstance)] +async fn destroy_instance(ctx: &ActivityCtx, input: &DestroyInstanceInput) -> GlobalResult<()> { + // Build HTTP client + let client = client::Client::new(input.api_token.clone()).await?; + + api::delete_instance(&client, input.linode_id).await?; + api::delete_ssh_key(&client, input.ssh_key_id).await?; + api::delete_firewall(&client, input.firewall_id).await?; + + Ok(()) +} diff --git a/svc/pkg/linode/standalone/gc/Cargo.toml b/svc/pkg/linode/standalone/gc/Cargo.toml index 4a2cd1ce5..3a775d3c2 100644 --- a/svc/pkg/linode/standalone/gc/Cargo.toml +++ b/svc/pkg/linode/standalone/gc/Cargo.toml @@ -19,8 +19,8 @@ serde_json = "1.0" tokio = { version = "1.29", features = ["full"] } tracing = "0.1" tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt", "json", "ansi"] } -util-cluster = { package = "rivet-util-cluster", path = "../../../cluster/util" } -util-linode = { package = "rivet-util-linode", path = "../../util" } + +linode = { path = "../.." } [dependencies.sqlx] git = "https://github.com/rivet-gg/sqlx" diff --git a/svc/pkg/linode/ops/instance-type-get/tests/integration.rs b/svc/pkg/linode/tests/instance_type_get.rs similarity index 100% rename from svc/pkg/linode/ops/instance-type-get/tests/integration.rs rename to svc/pkg/linode/tests/instance_type_get.rs diff --git a/svc/pkg/linode/worker/tests/prebake_install_complete.rs b/svc/pkg/linode/tests/prebake_install_complete.rs similarity index 100% rename from svc/pkg/linode/worker/tests/prebake_install_complete.rs rename to svc/pkg/linode/tests/prebake_install_complete.rs diff --git a/svc/pkg/linode/worker/tests/prebake_provision.rs b/svc/pkg/linode/tests/prebake_provision.rs similarity index 100% rename from svc/pkg/linode/worker/tests/prebake_provision.rs rename to svc/pkg/linode/tests/prebake_provision.rs diff --git a/svc/pkg/linode/ops/server-destroy/tests/integration.rs b/svc/pkg/linode/tests/server_destroy.rs similarity index 100% rename from svc/pkg/linode/ops/server-destroy/tests/integration.rs rename to svc/pkg/linode/tests/server_destroy.rs diff --git a/svc/pkg/linode/ops/server-provision/tests/integration.rs b/svc/pkg/linode/tests/server_provision.rs similarity index 100% rename from svc/pkg/linode/ops/server-provision/tests/integration.rs rename to svc/pkg/linode/tests/server_provision.rs diff --git a/svc/pkg/linode/util/Cargo.toml b/svc/pkg/linode/util/Cargo.toml deleted file mode 100644 index 4f385e77d..000000000 --- a/svc/pkg/linode/util/Cargo.toml +++ /dev/null @@ -1,15 +0,0 @@ -[package] -name = "rivet-util-linode" -version = "0.1.0" -edition = "2021" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chrono = "0.4" -rand = "0.8" -reqwest = { version = "0.11", features = ["json"] } -rivet-operation = { path = "../../../../lib/operation/core" } -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" -ssh-key = "0.6.3" diff --git a/svc/pkg/linode/worker/Cargo.toml b/svc/pkg/linode/worker/Cargo.toml index 4ac5b98f4..ced034a61 100644 --- a/svc/pkg/linode/worker/Cargo.toml +++ b/svc/pkg/linode/worker/Cargo.toml @@ -12,10 +12,9 @@ chirp-worker = { path = "../../../../lib/chirp/worker" } rivet-health-checks = { path = "../../../../lib/health-checks" } rivet-metrics = { path = "../../../../lib/metrics" } rivet-runtime = { path = "../../../../lib/runtime" } -util-cluster = { package = "rivet-util-cluster", path = "../../cluster/util" } util-linode = { package = "rivet-util-linode", path = "../util" } -cluster-datacenter-get = { path = "../../cluster/ops/datacenter-get" } +cluster = { path = "../../cluster" } [dependencies.sqlx] git = "https://github.com/rivet-gg/sqlx" diff --git a/svc/pkg/linode/worker/src/lib.rs b/svc/pkg/linode/worker/src/lib.rs deleted file mode 100644 index 3719b10aa..000000000 --- a/svc/pkg/linode/worker/src/lib.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod workers; diff --git a/svc/pkg/linode/worker/src/workers/mod.rs b/svc/pkg/linode/worker/src/workers/mod.rs deleted file mode 100644 index d54e70dc6..000000000 --- a/svc/pkg/linode/worker/src/workers/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub mod prebake_install_complete; -pub mod prebake_provision; - -chirp_worker::workers![prebake_install_complete, prebake_provision,]; diff --git a/svc/pkg/monolith/standalone/worker/Cargo.toml b/svc/pkg/monolith/standalone/worker/Cargo.toml index 047dd4ead..5e026e03a 100644 --- a/svc/pkg/monolith/standalone/worker/Cargo.toml +++ b/svc/pkg/monolith/standalone/worker/Cargo.toml @@ -23,13 +23,13 @@ tracing-subscriber = { version = "0.3", default-features = false, features = [ cdn-worker = { path = "../../../cdn/worker" } cf-custom-hostname-worker = { path = "../../../cf-custom-hostname/worker" } cloud-worker = { path = "../../../cloud/worker" } -cluster-worker = { path = "../../../cluster/worker" } +cluster = { path = "../../../cluster" } external-worker = { path = "../../../external/worker" } game-user-worker = { path = "../../../game-user/worker" } job-log-worker = { path = "../../../job-log/worker" } job-run-worker = { path = "../../../job-run/worker" } kv-worker = { path = "../../../kv/worker" } -linode-worker = { path = "../../../linode/worker" } +linode = { path = "../../../linode" } mm-worker = { path = "../../../mm/worker" } team-invite-worker = { path = "../../../team-invite/worker" } team-worker = { path = "../../../team/worker" } diff --git a/svc/pkg/region/ops/get/Cargo.toml b/svc/pkg/region/ops/get/Cargo.toml index 2520092cc..824356593 100644 --- a/svc/pkg/region/ops/get/Cargo.toml +++ b/svc/pkg/region/ops/get/Cargo.toml @@ -10,8 +10,7 @@ rivet-operation = { path = "../../../../../lib/operation/core" } chirp-client = { path = "../../../../../lib/chirp/client" } prost = "0.10" -cluster-datacenter-get = { path = "../../../cluster/ops/datacenter-get" } -cluster-datacenter-location-get = { path = "../../../cluster/ops/datacenter-location-get" } +cluster = { path = "../../../cluster" } [dependencies.sqlx] git = "https://github.com/rivet-gg/sqlx" diff --git a/svc/pkg/region/ops/list-for-game/Cargo.toml b/svc/pkg/region/ops/list-for-game/Cargo.toml index a7b72c1dc..87c107aef 100644 --- a/svc/pkg/region/ops/list-for-game/Cargo.toml +++ b/svc/pkg/region/ops/list-for-game/Cargo.toml @@ -10,8 +10,7 @@ rivet-operation = { path = "../../../../../lib/operation/core" } chirp-client = { path = "../../../../../lib/chirp/client" } prost = "0.10" -cluster-get-for-game = { path = "../../../cluster/ops/get-for-game" } -cluster-datacenter-list = { path = "../../../cluster/ops/datacenter-list" } +cluster = { path = "../../../cluster" } [dependencies.sqlx] git = "https://github.com/rivet-gg/sqlx" diff --git a/svc/pkg/region/ops/list/Cargo.toml b/svc/pkg/region/ops/list/Cargo.toml index 7be1a8d5a..5debef519 100644 --- a/svc/pkg/region/ops/list/Cargo.toml +++ b/svc/pkg/region/ops/list/Cargo.toml @@ -9,9 +9,8 @@ license = "Apache-2.0" rivet-operation = { path = "../../../../../lib/operation/core" } chirp-client = { path = "../../../../../lib/chirp/client" } prost = "0.10" -util-cluster = { package = "rivet-util-cluster", path = "../../../cluster/util" } -cluster-datacenter-list = { path = "../../../cluster/ops/datacenter-list" } +cluster = { path = "../../../cluster" } [dependencies.sqlx] git = "https://github.com/rivet-gg/sqlx" diff --git a/svc/pkg/region/ops/resolve-for-game/Cargo.toml b/svc/pkg/region/ops/resolve-for-game/Cargo.toml index 59548df97..49602f8be 100644 --- a/svc/pkg/region/ops/resolve-for-game/Cargo.toml +++ b/svc/pkg/region/ops/resolve-for-game/Cargo.toml @@ -10,7 +10,7 @@ rivet-operation = { path = "../../../../../lib/operation/core" } chirp-client = { path = "../../../../../lib/chirp/client" } prost = "0.10" -cluster-datacenter-get = { path = "../../../cluster/ops/datacenter-get" } +cluster = { path = "../../../cluster" } region-list-for-game = { path = "../list-for-game" } [dependencies.sqlx] diff --git a/svc/pkg/region/ops/resolve/Cargo.toml b/svc/pkg/region/ops/resolve/Cargo.toml index 6dbcda2f7..07ac553d3 100644 --- a/svc/pkg/region/ops/resolve/Cargo.toml +++ b/svc/pkg/region/ops/resolve/Cargo.toml @@ -10,7 +10,7 @@ rivet-operation = { path = "../../../../../lib/operation/core" } chirp-client = { path = "../../../../../lib/chirp/client" } prost = "0.10" -cluster-datacenter-get = { path = "../../../cluster/ops/datacenter-get" } +cluster = { path = "../../../cluster" } region-list = { path = "../list" } [dependencies.sqlx] diff --git a/svc/pkg/tier/ops/list/Cargo.toml b/svc/pkg/tier/ops/list/Cargo.toml index c335edf0b..fdb64f282 100644 --- a/svc/pkg/tier/ops/list/Cargo.toml +++ b/svc/pkg/tier/ops/list/Cargo.toml @@ -9,12 +9,9 @@ license = "Apache-2.0" rivet-operation = { path = "../../../../../lib/operation/core" } chirp-client = { path = "../../../../../lib/chirp/client" } prost = "0.10" -util-cluster = { package = "rivet-util-cluster", path = "../../../cluster/util" } -cluster-datacenter-get = { path = "../../../cluster/ops/datacenter-get" } -linode-instance-type-get = { path = "../../../linode/ops/instance-type-get" } +cluster = { path = "../../../cluster" } +linode = { path = "../../../linode" } [dev-dependencies] chirp-worker = { path = "../../../../../lib/chirp/worker" } - -cluster-datacenter-list = { path = "../../../cluster/ops/datacenter-list" }