diff --git a/lib/bolt/core/src/context/service.rs b/lib/bolt/core/src/context/service.rs index d4e5d4c99d..e32c35d940 100644 --- a/lib/bolt/core/src/context/service.rs +++ b/lib/bolt/core/src/context/service.rs @@ -510,33 +510,35 @@ impl ServiceContextData { ); } - let can_depend = - if self.is_monolith_worker() { - matches!( - dep.config().kind, - ServiceKind::Database { .. } - | ServiceKind::Cache { .. } | ServiceKind::Operation { .. } - | ServiceKind::Package { .. } - | ServiceKind::Consumer { .. } - ) - } else if matches!(self.config().kind, ServiceKind::Api { .. }) { - matches!( - dep.config().kind, - ServiceKind::Database { .. } - | ServiceKind::Cache { .. } | ServiceKind::Operation { .. } - | ServiceKind::Package { .. } - | ServiceKind::ApiRoutes { .. } - | ServiceKind::Consumer { .. } - ) - } else { - matches!( - dep.config().kind, - ServiceKind::Database { .. } - | ServiceKind::Cache { .. } | ServiceKind::Operation { .. } - | ServiceKind::Package { .. } - | ServiceKind::Consumer { .. } - ) - }; + let can_depend = if self.is_monolith_worker() { + matches!( + dep.config().kind, + ServiceKind::Database { .. } + | ServiceKind::Cache { .. } + | ServiceKind::Operation { .. } + | ServiceKind::Package { .. } + | ServiceKind::Consumer { .. } + ) + } else if matches!(self.config().kind, ServiceKind::Api { .. }) { + matches!( + dep.config().kind, + ServiceKind::Database { .. } + | ServiceKind::Cache { .. } + | ServiceKind::Operation { .. } + | ServiceKind::Package { .. } + | ServiceKind::ApiRoutes { .. } + | ServiceKind::Consumer { .. } + ) + } else { + matches!( + dep.config().kind, + ServiceKind::Database { .. } + | ServiceKind::Cache { .. } + | ServiceKind::Operation { .. } + | ServiceKind::Package { .. } + | ServiceKind::Consumer { .. } + ) + }; if !can_depend { panic!( diff --git a/svc/pkg/cluster/src/workflows/datacenter/scale.rs b/svc/pkg/cluster/src/workflows/datacenter/scale.rs index 76642317e8..ed533ebac2 100644 --- a/svc/pkg/cluster/src/workflows/datacenter/scale.rs +++ b/svc/pkg/cluster/src/workflows/datacenter/scale.rs @@ -246,9 +246,12 @@ async fn inner( .map(TryInto::try_into) .collect::>>()?; - // Sort job servers by memory usage + // Sort job servers by allocated memory servers.sort_by_key(|server| memory_by_server.get(&server.server_id)); + // TODO: remove + tracing::info!(server_ids=?servers.iter().map(|s| s.server_id).collect::>(), ?memory_by_server, "server topo"); + // TODO: RVT-3732 Sort gg and ats servers by cpu usage // servers.sort_by_key @@ -388,7 +391,6 @@ async fn scale_down_job_servers( let drain_candidates = nomad_servers .iter() - .rev() .take(drain_count) .map(|server| server.server_id); @@ -420,7 +422,6 @@ async fn scale_down_gg_servers<'a, I: Iterator + DoubleEndedI tracing::info!(count=%drain_count, "draining gg servers"); let drain_candidates = installed_servers - .rev() .take(drain_count) .map(|server| server.server_id); @@ -455,7 +456,6 @@ async fn scale_down_ats_servers< tracing::info!(count=%drain_count, "draining ats servers"); let drain_candidates = installed_servers - .rev() .take(drain_count) .map(|server| server.server_id); diff --git a/svc/pkg/mm/worker/src/workers/lobby_create/nomad_job.rs b/svc/pkg/mm/worker/src/workers/lobby_create/nomad_job.rs index cce6323d8f..ab2e1a6020 100644 --- a/svc/pkg/mm/worker/src/workers/lobby_create/nomad_job.rs +++ b/svc/pkg/mm/worker/src/workers/lobby_create/nomad_job.rs @@ -94,7 +94,7 @@ pub fn gen_lobby_docker_job( // Nomad configures CPU based on MHz, not millicores. We havel to calculate the CPU share // by knowing how many MHz are on the client. CPU: if tier.rivet_cores_numerator < tier.rivet_cores_denominator { - Some((tier.cpu - util_job::TASK_CLEANUP_CPU as u64).try_into()?) + Some(tier.cpu.try_into()?) } else { None }, @@ -103,18 +103,10 @@ pub fn gen_lobby_docker_job( } else { None }, - memory_mb: Some( - (TryInto::::try_into(memory)? / (1024 * 1024) - - util_job::TASK_CLEANUP_MEMORY as i64) - .try_into()?, - ), + memory_mb: Some(tier.memory.try_into()?), // Allow oversubscribing memory by 50% of the reserved // memory if using less than the node's total memory - memory_max_mb: Some( - (TryInto::::try_into(memory_max)? / (1024 * 1024) - - util_job::TASK_CLEANUP_MEMORY as i64) - .try_into()?, - ), + memory_max_mb: Some(tier.memory_max.try_into()?), disk_mb: Some(tier.disk as i32), // TODO: Is this deprecated? ..Resources::new() }; diff --git a/svc/pkg/tier/ops/list/src/lib.rs b/svc/pkg/tier/ops/list/src/lib.rs index 02636671b2..f51dbc1e2a 100644 --- a/svc/pkg/tier/ops/list/src/lib.rs +++ b/svc/pkg/tier/ops/list/src/lib.rs @@ -53,16 +53,7 @@ async fn handle(ctx: OperationContext) -> GlobalResult