From b1f9749105739c8fed14f9dfdfe72215701e78c0 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 3 Oct 2022 19:26:18 -0400 Subject: [PATCH 01/80] First pass of resource utilization --- common/src/sql/dbinit.sql | 15 ++ nexus/db-model/src/lib.rs | 2 + nexus/db-model/src/queries/mod.rs | 1 + .../src/queries/resource_usage_update.rs | 28 ++++ nexus/db-model/src/region.rs | 5 + nexus/db-model/src/resource_usage.rs | 21 +++ nexus/db-model/src/schema.rs | 7 + nexus/src/app/disk.rs | 24 +-- nexus/src/app/project.rs | 11 +- nexus/src/app/sagas/disk_create.rs | 89 +++++++++++- nexus/src/app/sagas/disk_delete.rs | 8 +- nexus/src/app/sagas/snapshot_create.rs | 7 +- nexus/src/app/sagas/volume_delete.rs | 22 +++ nexus/src/app/volume.rs | 10 +- nexus/src/db/datastore/mod.rs | 1 + nexus/src/db/datastore/organization.rs | 14 +- nexus/src/db/datastore/project.rs | 15 +- nexus/src/db/datastore/resource_usage.rs | 72 +++++++++ nexus/src/db/datastore/silo.rs | 17 ++- nexus/src/db/queries/mod.rs | 1 + nexus/src/db/queries/resource_usage_update.rs | 137 ++++++++++++++++++ 21 files changed, 480 insertions(+), 27 deletions(-) create mode 100644 nexus/db-model/src/queries/resource_usage_update.rs create mode 100644 nexus/db-model/src/resource_usage.rs create mode 100644 nexus/src/db/datastore/resource_usage.rs create mode 100644 nexus/src/db/queries/resource_usage_update.rs diff --git a/common/src/sql/dbinit.sql b/common/src/sql/dbinit.sql index 198bb09a6f4..b12321020b7 100644 --- a/common/src/sql/dbinit.sql +++ b/common/src/sql/dbinit.sql @@ -128,6 +128,21 @@ CREATE INDEX ON omicron.public.service ( sled_id ); +/* + * A table describing resource usage which may be associated + * with a collection of objects, including: + * - Projects + * - Organizations + * - Silos + * - Fleet + */ +CREATE TABLE omicron.public.resource_usage ( + /* Should match the UUID of the corresponding collection */ + id UUID PRIMARY KEY, + + disk_bytes_used INT8 NOT NULL +); + /* * ZPools of Storage, attached to Sleds. * Typically these are backed by a single physical disk. diff --git a/nexus/db-model/src/lib.rs b/nexus/db-model/src/lib.rs index 945a9651de5..d06f7b28ba4 100644 --- a/nexus/db-model/src/lib.rs +++ b/nexus/db-model/src/lib.rs @@ -46,6 +46,7 @@ pub mod queries; mod rack; mod region; mod region_snapshot; +mod resource_usage; mod role_assignment; mod role_builtin; pub mod saga_types; @@ -112,6 +113,7 @@ pub use project::*; pub use rack::*; pub use region::*; pub use region_snapshot::*; +pub use resource_usage::*; pub use role_assignment::*; pub use role_builtin::*; pub use service::*; diff --git a/nexus/db-model/src/queries/mod.rs b/nexus/db-model/src/queries/mod.rs index 20c94b8285e..b3e867009d8 100644 --- a/nexus/db-model/src/queries/mod.rs +++ b/nexus/db-model/src/queries/mod.rs @@ -5,3 +5,4 @@ //! Subqueries used in CTEs. pub mod region_allocation; +pub mod resource_usage_update; diff --git a/nexus/db-model/src/queries/resource_usage_update.rs b/nexus/db-model/src/queries/resource_usage_update.rs new file mode 100644 index 00000000000..c7bf9199e3b --- /dev/null +++ b/nexus/db-model/src/queries/resource_usage_update.rs @@ -0,0 +1,28 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Describes the resource usage update CTE + +use crate::schema::organization; +use crate::schema::resource_usage; + +table! { + parent_org { + id -> Uuid, + } +} + +table! { + parent_silo { + id -> Uuid, + } +} + +diesel::allow_tables_to_appear_in_same_query!(organization, parent_org,); + +diesel::allow_tables_to_appear_in_same_query!( + resource_usage, + parent_org, + parent_silo, +); diff --git a/nexus/db-model/src/region.rs b/nexus/db-model/src/region.rs index 5fcbaddb4a9..349972ec510 100644 --- a/nexus/db-model/src/region.rs +++ b/nexus/db-model/src/region.rs @@ -75,4 +75,9 @@ impl Region { // external, customer-supplied keys is a non-requirement. true } + pub fn size_used(&self) -> i64 { + self.extent_count() + * self.blocks_per_extent() + * self.block_size().to_bytes() as i64 + } } diff --git a/nexus/db-model/src/resource_usage.rs b/nexus/db-model/src/resource_usage.rs new file mode 100644 index 00000000000..a219db8fd2d --- /dev/null +++ b/nexus/db-model/src/resource_usage.rs @@ -0,0 +1,21 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::schema::resource_usage; +use uuid::Uuid; + +/// Describes resource_usage for a collection +#[derive(Selectable, Queryable, Insertable, Debug)] +#[diesel(table_name = resource_usage)] +pub struct ResourceUsage { + pub id: Uuid, + + pub disk_bytes_used: i64, +} + +impl ResourceUsage { + pub fn new(id: Uuid) -> Self { + Self { id, disk_bytes_used: 0 } + } +} diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index b2e30402e96..8e141a8425e 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -394,6 +394,13 @@ table! { } } +table! { + resource_usage { + id -> Uuid, + disk_bytes_used -> Int8, + } +} + table! { zpool (id) { id -> Uuid, diff --git a/nexus/src/app/disk.rs b/nexus/src/app/disk.rs index e26955f12fe..eeae9b1f5a3 100644 --- a/nexus/src/app/disk.rs +++ b/nexus/src/app/disk.rs @@ -377,15 +377,19 @@ impl super::Nexus { project_name: &Name, disk_name: &Name, ) -> DeleteResult { - let (.., authz_disk) = LookupPath::new(opctx, &self.db_datastore) - .organization_name(organization_name) - .project_name(project_name) - .disk_name(disk_name) - .lookup_for(authz::Action::Delete) - .await?; + let (.., project, authz_disk) = + LookupPath::new(opctx, &self.db_datastore) + .organization_name(organization_name) + .project_name(project_name) + .disk_name(disk_name) + .lookup_for(authz::Action::Delete) + .await?; - let saga_params = - sagas::disk_delete::Params { disk_id: authz_disk.id() }; + let saga_params = sagas::disk_delete::Params { + serialized_authn: authn::saga::Serialized::for_opctx(opctx), + project_id: project.id(), + disk_id: authz_disk.id(), + }; self.execute_saga::(saga_params) .await?; Ok(()) @@ -498,7 +502,7 @@ impl super::Nexus { // reference counting for volumes, and probably means this needs to // instead be a saga. - let (.., authz_snapshot, db_snapshot) = + let (.., project, authz_snapshot, db_snapshot) = LookupPath::new(opctx, &self.db_datastore) .organization_name(organization_name) .project_name(project_name) @@ -511,7 +515,7 @@ impl super::Nexus { .await?; // Kick off volume deletion saga - self.volume_delete(db_snapshot.volume_id).await?; + self.volume_delete(opctx, project.id(), db_snapshot.volume_id).await?; Ok(()) } diff --git a/nexus/src/app/project.rs b/nexus/src/app/project.rs index a76262ca219..03f4d1ff18e 100644 --- a/nexus/src/app/project.rs +++ b/nexus/src/app/project.rs @@ -35,6 +35,12 @@ impl super::Nexus { .lookup_for(authz::Action::CreateChild) .await?; + // TODO: We probably want to have "project creation", "resource usage + // creation", and "default VPC creation" co-located within a saga for + // atomicity. + // + // Until then, we just perform the operations sequentially. + // Create a project. let db_project = db::model::Project::new(authz_org.id(), new_project.clone()); @@ -43,11 +49,6 @@ impl super::Nexus { .project_create(opctx, &authz_org, db_project) .await?; - // TODO: We probably want to have "project creation" and "default VPC - // creation" co-located within a saga for atomicity. - // - // Until then, we just perform the operations sequentially. - // Create a default VPC associated with the project. // TODO-correctness We need to be using the project_id we just created. // project_create() should return authz::Project and we should use that diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index c65f97d7331..4f496b21e43 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -51,8 +51,16 @@ lazy_static! { sdc_create_disk_record, sdc_create_disk_record_undo ); - static ref REGIONS_ALLOC: NexusAction = - new_action_noop_undo("disk-create.regions-alloc", sdc_alloc_regions,); + static ref REGIONS_ALLOC: NexusAction = ActionFunc::new_action( + "disk-create.allocate-regions", + sdc_alloc_regions, + sdc_alloc_regions_undo, + ); + static ref REGIONS_ACCOUNT: NexusAction = ActionFunc::new_action( + "disk-create.account-regions", + sdc_account_regions, + sdc_account_regions_undo, + ); static ref REGIONS_ENSURE: NexusAction = new_action_noop_undo("disk-create.regions-ensure", sdc_regions_ensure,); static ref CREATE_VOLUME_RECORD: NexusAction = ActionFunc::new_action( @@ -246,6 +254,76 @@ async fn sdc_alloc_regions( Ok(datasets_and_regions) } +async fn sdc_alloc_regions_undo( + sagactx: NexusActionContext, +) -> Result<(), anyhow::Error> { + let osagactx = sagactx.user_data(); + + let region_ids = sagactx + .lookup::>( + "datasets_and_regions", + )? + .into_iter() + .map(|(_, region)| region.id()) + .collect::>(); + + osagactx.datastore().regions_hard_delete(region_ids).await?; + Ok(()) +} + +fn get_space_used_by_allocated_regions( + sagactx: &NexusActionContext, +) -> Result { + let space_used = sagactx + .lookup::>( + "datasets_and_regions", + )? + .into_iter() + .map(|(_, region)| region.size_used()) + .fold(0, |acc, x| acc + x); + Ok(space_used) +} + +// TODO: Not yet idempotent +async fn sdc_account_regions( + sagactx: NexusActionContext, +) -> Result<(), ActionError> { + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + + let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); + osagactx + .datastore() + .resource_usage_update_disk( + &opctx, + params.project_id, + get_space_used_by_allocated_regions(&sagactx)?, + ) + .await + .map_err(ActionError::action_failed)?; + Ok(()) +} + +// TODO: Not yet idempotent +async fn sdc_account_regions_undo( + sagactx: NexusActionContext, +) -> Result<(), anyhow::Error> { + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + + let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); + osagactx + .datastore() + .resource_usage_update_disk( + &opctx, + params.project_id, + -get_space_used_by_allocated_regions(&sagactx)?, + ) + .await + .map_err(ActionError::action_failed)?; + Ok(()) +} + /// Call out to Crucible agent and perform region creation. async fn ensure_region_in_dataset( log: &Logger, @@ -560,9 +638,14 @@ async fn sdc_create_volume_record_undo( sagactx: NexusActionContext, ) -> Result<(), anyhow::Error> { let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); let volume_id = sagactx.lookup::("volume_id")?; - osagactx.nexus().volume_delete(volume_id).await?; + osagactx + .nexus() + .volume_delete(&opctx, params.project_id, volume_id) + .await?; Ok(()) } diff --git a/nexus/src/app/sagas/disk_delete.rs b/nexus/src/app/sagas/disk_delete.rs index 9cb8ac04b88..8bb62c1a270 100644 --- a/nexus/src/app/sagas/disk_delete.rs +++ b/nexus/src/app/sagas/disk_delete.rs @@ -6,6 +6,8 @@ use super::ActionRegistry; use super::NexusActionContext; use super::NexusSaga; use crate::app::sagas::NexusAction; +use crate::authn; +use crate::context::OpContext; use lazy_static::lazy_static; use serde::Deserialize; use serde::Serialize; @@ -19,6 +21,8 @@ use uuid::Uuid; #[derive(Debug, Deserialize, Serialize)] pub struct Params { + pub serialized_authn: authn::saga::Serialized, + pub project_id: Uuid, pub disk_id: Uuid, } @@ -89,10 +93,12 @@ async fn sdd_delete_volume( sagactx: NexusActionContext, ) -> Result<(), ActionError> { let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); let volume_id = sagactx.lookup::("volume_id")?; osagactx .nexus() - .volume_delete(volume_id) + .volume_delete(&opctx, params.project_id, volume_id) .await .map_err(ActionError::action_failed)?; Ok(()) diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index 2d11ad91e01..4c65b2d984f 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -584,10 +584,15 @@ async fn ssc_create_volume_record_undo( ) -> Result<(), anyhow::Error> { let log = sagactx.user_data().log(); let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); let volume_id = sagactx.lookup::("volume_id")?; info!(log, "deleting volume {}", volume_id); - osagactx.nexus().volume_delete(volume_id).await?; + osagactx + .nexus() + .volume_delete(&opctx, params.project_id, volume_id) + .await?; Ok(()) } diff --git a/nexus/src/app/sagas/volume_delete.rs b/nexus/src/app/sagas/volume_delete.rs index 74efcc5eb3b..9ac93a6a62c 100644 --- a/nexus/src/app/sagas/volume_delete.rs +++ b/nexus/src/app/sagas/volume_delete.rs @@ -28,6 +28,8 @@ use super::NexusActionContext; use super::NexusSaga; use super::MAX_CONCURRENT_REGION_REQUESTS; use crate::app::sagas::NexusAction; +use crate::authn; +use crate::context::OpContext; use crate::db; use crate::db::datastore::CrucibleResources; use crucible_agent_client::{types::RegionId, Client as CrucibleAgentClient}; @@ -47,6 +49,8 @@ use uuid::Uuid; #[derive(Debug, Deserialize, Serialize)] pub struct Params { + pub serialized_authn: authn::saga::Serialized, + pub project_id: Uuid, pub volume_id: Uuid, } @@ -171,6 +175,7 @@ async fn svd_delete_crucible_regions( sagactx: NexusActionContext, ) -> Result<(), ActionError> { let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; let crucible_resources_to_delete = sagactx.lookup::("crucible_resources_to_delete")?; @@ -191,6 +196,23 @@ async fn svd_delete_crucible_regions( .map(|(_, r)| r.id()) .collect(); + // TODO: This accounting is not yet idempotent + let space_used = crucible_resources_to_delete + .datasets_and_regions + .iter() + .fold(0, |acc, (_, r)| acc + r.size_used()); + let opctx = + OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); + osagactx + .datastore() + .resource_usage_update_disk( + &opctx, + params.project_id, + -space_used, + ) + .await + .map_err(ActionError::action_failed)?; + osagactx .datastore() .regions_hard_delete(region_ids_to_delete) diff --git a/nexus/src/app/volume.rs b/nexus/src/app/volume.rs index e8bfe362920..714604bcb13 100644 --- a/nexus/src/app/volume.rs +++ b/nexus/src/app/volume.rs @@ -5,6 +5,8 @@ //! Volumes use crate::app::sagas; +use crate::authn; +use crate::context::OpContext; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; use std::sync::Arc; @@ -24,9 +26,15 @@ impl super::Nexus { /// and the user's query shouldn't wait on those DELETE calls. pub async fn volume_delete( self: &Arc, + opctx: &OpContext, + project_id: Uuid, volume_id: Uuid, ) -> DeleteResult { - let saga_params = sagas::volume_delete::Params { volume_id }; + let saga_params = sagas::volume_delete::Params { + serialized_authn: authn::saga::Serialized::for_opctx(opctx), + project_id, + volume_id, + }; // TODO execute this in the background instead, not using the usual SEC let saga_outputs = self diff --git a/nexus/src/db/datastore/mod.rs b/nexus/src/db/datastore/mod.rs index 7d50376351a..2dd4601895f 100644 --- a/nexus/src/db/datastore/mod.rs +++ b/nexus/src/db/datastore/mod.rs @@ -56,6 +56,7 @@ mod project; mod rack; mod region; mod region_snapshot; +mod resource_usage; mod role; mod saga; mod service; diff --git a/nexus/src/db/datastore/organization.rs b/nexus/src/db/datastore/organization.rs index e237aa63819..d16fec24133 100644 --- a/nexus/src/db/datastore/organization.rs +++ b/nexus/src/db/datastore/organization.rs @@ -17,6 +17,7 @@ use crate::db::identity::Resource; use crate::db::model::Name; use crate::db::model::Organization; use crate::db::model::OrganizationUpdate; +use crate::db::model::ResourceUsage; use crate::db::model::Silo; use crate::db::pagination::paginated; use crate::external_api::params; @@ -51,7 +52,7 @@ impl DataStore { let organization = Organization::new(organization.clone(), silo_id); let name = organization.name().as_str().to_string(); - Silo::insert_resource( + let org = Silo::insert_resource( silo_id, diesel::insert_into(dsl::organization).values(organization), ) @@ -71,7 +72,16 @@ impl DataStore { ErrorHandler::Conflict(ResourceType::Organization, &name), ) } - }) + })?; + + // Create resource usage for the org. + // + // NOTE: if you do this before the org is created, it'll exist as + // soon as the org does. However, that'll work better in a saga/CTE when + // unwinding is built-in more naturally. + self.resource_usage_create(opctx, ResourceUsage::new(org.id())).await?; + + Ok(org) } /// Delete a organization diff --git a/nexus/src/db/datastore/project.rs b/nexus/src/db/datastore/project.rs index b4063d7ad49..caf2c073f82 100644 --- a/nexus/src/db/datastore/project.rs +++ b/nexus/src/db/datastore/project.rs @@ -17,6 +17,7 @@ use crate::db::model::Name; use crate::db::model::Organization; use crate::db::model::Project; use crate::db::model::ProjectUpdate; +use crate::db::model::ResourceUsage; use crate::db::pagination::paginated; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; @@ -45,7 +46,7 @@ impl DataStore { let name = project.name().as_str().to_string(); let organization_id = project.organization_id; - Organization::insert_resource( + let project = Organization::insert_resource( organization_id, diesel::insert_into(dsl::project).values(project), ) @@ -62,7 +63,17 @@ impl DataStore { ErrorHandler::Conflict(ResourceType::Project, &name), ) } - }) + })?; + + // Create resource usage for the project. + // + // NOTE: if you do this before the project is created, it'll exist as + // soon as the project does. However, that'll work better in a saga/CTE when + // unwinding is built-in more naturally. + self.resource_usage_create(opctx, ResourceUsage::new(project.id())) + .await?; + + Ok(project) } /// Delete a project diff --git a/nexus/src/db/datastore/resource_usage.rs b/nexus/src/db/datastore/resource_usage.rs new file mode 100644 index 00000000000..7e4547b124f --- /dev/null +++ b/nexus/src/db/datastore/resource_usage.rs @@ -0,0 +1,72 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods on [`ResourceUsage`]s. + +use super::DataStore; +use crate::context::OpContext; +use crate::db; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::model::ResourceUsage; +use crate::db::queries::resource_usage_update::ResourceUsageUpdate; +use async_bb8_diesel::AsyncRunQueryDsl; +use diesel::prelude::*; +use omicron_common::api::external::DeleteResult; +use omicron_common::api::external::Error; +use uuid::Uuid; + +impl DataStore { + /// Create a resource_usage + pub async fn resource_usage_create( + &self, + opctx: &OpContext, + resource_usage: ResourceUsage, + ) -> Result<(), Error> { + use db::schema::resource_usage::dsl; + + diesel::insert_into(dsl::resource_usage) + .values(resource_usage) + .on_conflict_do_nothing() + .execute_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool(e, ErrorHandler::Server) + })?; + Ok(()) + } + + /// Delete a resource_usage + pub async fn resource_usage_delete( + &self, + opctx: &OpContext, + id: Uuid, + ) -> DeleteResult { + use db::schema::resource_usage::dsl; + + diesel::delete(dsl::resource_usage) + .filter(dsl::id.eq(id)) + .execute_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool(e, ErrorHandler::Server) + })?; + Ok(()) + } + + pub async fn resource_usage_update_disk( + &self, + opctx: &OpContext, + project_id: Uuid, + disk_byte_diff: i64, + ) -> Result<(), Error> { + ResourceUsageUpdate::new_update_disk(project_id, disk_byte_diff) + .execute_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool(e, ErrorHandler::Server) + })?; + Ok(()) + } +} diff --git a/nexus/src/db/datastore/silo.rs b/nexus/src/db/datastore/silo.rs index 28bf42f12e0..03cb2f20ff4 100644 --- a/nexus/src/db/datastore/silo.rs +++ b/nexus/src/db/datastore/silo.rs @@ -16,6 +16,7 @@ use crate::db::error::TransactionError; use crate::db::fixed_data::silo::DEFAULT_SILO; use crate::db::identity::Resource; use crate::db::model::Name; +use crate::db::model::ResourceUsage; use crate::db::model::Silo; use crate::db::pagination::paginated; use crate::external_api::params; @@ -53,10 +54,17 @@ impl DataStore { public_error_from_diesel_pool(e, ErrorHandler::Server) })?; info!(opctx.log, "created {} built-in silos", count); + + self.resource_usage_create( + opctx, + ResourceUsage::new(DEFAULT_SILO.id()), + ) + .await?; + Ok(()) } - pub async fn silo_create_query( + async fn silo_create_query( opctx: &OpContext, silo: Silo, ) -> Result, Error> { @@ -77,7 +85,7 @@ impl DataStore { let silo_id = Uuid::new_v4(); let silo_group_id = Uuid::new_v4(); - let silo_create_query = DataStore::silo_create_query( + let silo_create_query = Self::silo_create_query( opctx, db::model::Silo::new_with_id(silo_id, new_silo_params.clone()), ) @@ -134,6 +142,11 @@ impl DataStore { .await? .transaction_async(|conn| async move { let silo = silo_create_query.get_result_async(&conn).await?; + use db::schema::resource_usage::dsl; + diesel::insert_into(dsl::resource_usage) + .values(ResourceUsage::new(silo.id())) + .execute_async(&conn) + .await?; if let Some(query) = silo_admin_group_ensure_query { query.get_result_async(&conn).await?; diff --git a/nexus/src/db/queries/mod.rs b/nexus/src/db/queries/mod.rs index 077d542dbbb..c6a8e60efb3 100644 --- a/nexus/src/db/queries/mod.rs +++ b/nexus/src/db/queries/mod.rs @@ -11,5 +11,6 @@ pub mod ip_pool; mod next_item; pub mod network_interface; pub mod region_allocation; +pub mod resource_usage_update; pub mod vpc; pub mod vpc_subnet; diff --git a/nexus/src/db/queries/resource_usage_update.rs b/nexus/src/db/queries/resource_usage_update.rs new file mode 100644 index 00000000000..625276c8c8d --- /dev/null +++ b/nexus/src/db/queries/resource_usage_update.rs @@ -0,0 +1,137 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Implementation of queries for updating resource usage info. + +use crate::db::alias::ExpressionAlias; +use crate::db::pool::DbConnection; +use crate::db::subquery::{AsQuerySource, Cte, CteBuilder, CteQuery}; +use db_macros::Subquery; +use diesel::pg::Pg; +use diesel::query_builder::{AstPass, Query, QueryFragment, QueryId}; +use diesel::{ + sql_types, BoolExpressionMethods, ExpressionMethods, + NullableExpressionMethods, QueryDsl, RunQueryDsl, +}; +use nexus_db_model::queries::resource_usage_update::{parent_org, parent_silo}; + +#[derive(Subquery, QueryId)] +#[subquery(name = parent_org)] +struct ParentOrg { + query: Box>, +} + +impl ParentOrg { + fn new(project_id: uuid::Uuid) -> Self { + use crate::db::schema::project::dsl; + Self { + query: Box::new( + dsl::project.filter(dsl::id.eq(project_id)).select(( + ExpressionAlias::new::( + dsl::organization_id, + ), + )), + ), + } + } +} + +#[derive(Subquery, QueryId)] +#[subquery(name = parent_silo)] +struct ParentSilo { + query: Box>, +} + +impl ParentSilo { + fn new(parent_org: &ParentOrg) -> Self { + use crate::db::schema::organization::dsl; + Self { + query: Box::new( + dsl::organization + .filter( + dsl::id.eq(parent_org + .query_source() + .select(parent_org::id) + .single_value() + .assume_not_null()), + ) + .select((ExpressionAlias::new::( + dsl::silo_id, + ),)), + ), + } + } +} + +/// Constructs a CTE for updating resource usage information in all +/// collections for a particular object. +#[derive(QueryId)] +pub struct ResourceUsageUpdate { + cte: Cte, +} + +impl ResourceUsageUpdate { + pub fn new_update_disk( + project_id: uuid::Uuid, + disk_bytes_diff: i64, + ) -> Self { + let parent_org = ParentOrg::new(project_id); + let parent_silo = ParentSilo::new(&parent_org); + + use crate::db::schema::resource_usage::dsl; + + let final_update = Box::new( + diesel::update(dsl::resource_usage) + .set( + dsl::disk_bytes_used + .eq(dsl::disk_bytes_used + disk_bytes_diff), + ) + .filter( + // Update the project + dsl::id + .eq(project_id) + // Update the organization containing the project + .or(dsl::id.eq(parent_org + .query_source() + .select(parent_org::id) + .single_value() + .assume_not_null())) + // Update the silo containing the organization + .or(dsl::id.eq(parent_silo + .query_source() + .select(parent_silo::id) + .single_value() + .assume_not_null())), // TODO: Presumably, we could also update the fleet containing + // the silo here. However, such an object does not exist in the + // database at the time of writing this comment. + ) + .returning(dsl::id), + ); + + let cte = CteBuilder::new() + .add_subquery(parent_org) + .add_subquery(parent_silo) + .build(final_update); + + Self { cte } + } +} + +impl QueryFragment for ResourceUsageUpdate { + fn walk_ast<'a>( + &'a self, + mut out: AstPass<'_, 'a, Pg>, + ) -> diesel::QueryResult<()> { + out.unsafe_to_cache_prepared(); + + self.cte.walk_ast(out.reborrow())?; + Ok(()) + } +} + +impl Query for ResourceUsageUpdate { + type SqlType = sql_types::Uuid; +} + +impl RunQueryDsl for ResourceUsageUpdate {} From d14f8fceecba87d7f5d7255ffdc7a9abab9f51a3 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 3 Oct 2022 19:36:02 -0400 Subject: [PATCH 02/80] less weird formatting --- nexus/src/db/queries/resource_usage_update.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nexus/src/db/queries/resource_usage_update.rs b/nexus/src/db/queries/resource_usage_update.rs index 625276c8c8d..2f7e0dbc34b 100644 --- a/nexus/src/db/queries/resource_usage_update.rs +++ b/nexus/src/db/queries/resource_usage_update.rs @@ -102,9 +102,10 @@ impl ResourceUsageUpdate { .query_source() .select(parent_silo::id) .single_value() - .assume_not_null())), // TODO: Presumably, we could also update the fleet containing - // the silo here. However, such an object does not exist in the - // database at the time of writing this comment. + .assume_not_null())), + // TODO: Presumably, we could also update the fleet containing + // the silo here. However, such an object does not exist in the + // database at the time of writing this comment. ) .returning(dsl::id), ); From 0430d1aa3dfa75eeab7f7ef63d7e3cf0f702a159 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 4 Oct 2022 12:43:02 -0400 Subject: [PATCH 03/80] Tweak CTE to avoid using 'OR'; prefer 'UNION' to avoid full table scans --- .../src/queries/resource_usage_update.rs | 7 ++ nexus/src/db/queries/resource_usage_update.rs | 79 ++++++++++++------- 2 files changed, 56 insertions(+), 30 deletions(-) diff --git a/nexus/db-model/src/queries/resource_usage_update.rs b/nexus/db-model/src/queries/resource_usage_update.rs index c7bf9199e3b..f80e5529c8c 100644 --- a/nexus/db-model/src/queries/resource_usage_update.rs +++ b/nexus/db-model/src/queries/resource_usage_update.rs @@ -19,10 +19,17 @@ table! { } } +table! { + all_collections { + id -> Uuid, + } +} + diesel::allow_tables_to_appear_in_same_query!(organization, parent_org,); diesel::allow_tables_to_appear_in_same_query!( resource_usage, parent_org, parent_silo, + all_collections, ); diff --git a/nexus/src/db/queries/resource_usage_update.rs b/nexus/src/db/queries/resource_usage_update.rs index 2f7e0dbc34b..f2740f03138 100644 --- a/nexus/src/db/queries/resource_usage_update.rs +++ b/nexus/src/db/queries/resource_usage_update.rs @@ -11,10 +11,11 @@ use db_macros::Subquery; use diesel::pg::Pg; use diesel::query_builder::{AstPass, Query, QueryFragment, QueryId}; use diesel::{ - sql_types, BoolExpressionMethods, ExpressionMethods, - NullableExpressionMethods, QueryDsl, RunQueryDsl, + sql_types, CombineDsl, ExpressionMethods, IntoSql, QueryDsl, RunQueryDsl, +}; +use nexus_db_model::queries::resource_usage_update::{ + all_collections, parent_org, parent_silo, }; -use nexus_db_model::queries::resource_usage_update::{parent_org, parent_silo}; #[derive(Subquery, QueryId)] #[subquery(name = parent_org)] @@ -49,13 +50,9 @@ impl ParentSilo { Self { query: Box::new( dsl::organization - .filter( - dsl::id.eq(parent_org - .query_source() - .select(parent_org::id) - .single_value() - .assume_not_null()), - ) + .filter(dsl::id.eq_any( + parent_org.query_source().select(parent_org::id), + )) .select((ExpressionAlias::new::( dsl::silo_id, ),)), @@ -64,6 +61,42 @@ impl ParentSilo { } } +#[derive(Subquery, QueryId)] +#[subquery(name = all_collections)] +struct AllCollections { + query: Box>, +} + +impl AllCollections { + fn new( + project_id: uuid::Uuid, + parent_org: &ParentOrg, + parent_silo: &ParentSilo, + ) -> Self { + Self { + query: Box::new( + diesel::select((ExpressionAlias::new::< + all_collections::dsl::id, + >( + project_id.into_sql::() + ),)) + .union(parent_org.query_source().select(( + ExpressionAlias::new::( + parent_org::id, + ), + ))) + .union(parent_silo.query_source().select(( + ExpressionAlias::new::( + parent_silo::id, + ), + ))), // TODO: Presumably, we could also update the fleet containing + // the silo here. However, such an object does not exist in the + // database at the time of writing this comment. + ), + } + } +} + /// Constructs a CTE for updating resource usage information in all /// collections for a particular object. #[derive(QueryId)] @@ -78,6 +111,8 @@ impl ResourceUsageUpdate { ) -> Self { let parent_org = ParentOrg::new(project_id); let parent_silo = ParentSilo::new(&parent_org); + let all_collections = + AllCollections::new(project_id, &parent_org, &parent_silo); use crate::db::schema::resource_usage::dsl; @@ -87,32 +122,16 @@ impl ResourceUsageUpdate { dsl::disk_bytes_used .eq(dsl::disk_bytes_used + disk_bytes_diff), ) - .filter( - // Update the project - dsl::id - .eq(project_id) - // Update the organization containing the project - .or(dsl::id.eq(parent_org - .query_source() - .select(parent_org::id) - .single_value() - .assume_not_null())) - // Update the silo containing the organization - .or(dsl::id.eq(parent_silo - .query_source() - .select(parent_silo::id) - .single_value() - .assume_not_null())), - // TODO: Presumably, we could also update the fleet containing - // the silo here. However, such an object does not exist in the - // database at the time of writing this comment. - ) + .filter(dsl::id.eq_any( + all_collections.query_source().select(all_collections::id), + )) .returning(dsl::id), ); let cte = CteBuilder::new() .add_subquery(parent_org) .add_subquery(parent_silo) + .add_subquery(all_collections) .build(final_update); Self { cte } From 55c1e81b11dcf3e01bf40f7a1c2c76cabec01c64 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 4 Oct 2022 19:37:46 -0400 Subject: [PATCH 04/80] Add test --- nexus/src/db/datastore/resource_usage.rs | 18 ++++ nexus/tests/integration_tests/disks.rs | 127 ++++++++++++++++++++++- 2 files changed, 144 insertions(+), 1 deletion(-) diff --git a/nexus/src/db/datastore/resource_usage.rs b/nexus/src/db/datastore/resource_usage.rs index 7e4547b124f..aa4129137f6 100644 --- a/nexus/src/db/datastore/resource_usage.rs +++ b/nexus/src/db/datastore/resource_usage.rs @@ -37,6 +37,24 @@ impl DataStore { Ok(()) } + pub async fn resource_usage_get( + &self, + opctx: &OpContext, + id: Uuid, + ) -> Result { + use db::schema::resource_usage::dsl; + + let resource_usage = dsl::resource_usage + .find(id) + .select(ResourceUsage::as_select()) + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool(e, ErrorHandler::Server) + })?; + Ok(resource_usage) + } + /// Delete a resource_usage pub async fn resource_usage_delete( &self, diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index 954cd028a3c..a699e82284e 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -34,7 +34,7 @@ use omicron_common::api::external::Instance; use omicron_common::api::external::Name; use omicron_common::backoff; use omicron_nexus::TestInterfaces as _; -use omicron_nexus::{external_api::params, Nexus}; +use omicron_nexus::{context::OpContext, external_api::params, Nexus}; use oximeter::types::Datum; use oximeter::types::Measurement; use sled_agent_client::TestInterfaces as _; @@ -43,6 +43,7 @@ use uuid::Uuid; const ORG_NAME: &str = "test-org"; const PROJECT_NAME: &str = "springfield-squidport-disks"; +const PROJECT_NAME_2: &str = "bouncymeadow-octopusharbor-disks"; const DISK_NAME: &str = "just-rainsticks"; const INSTANCE_NAME: &str = "just-rainsticks"; @@ -897,6 +898,130 @@ async fn test_disk_too_big(cptestctx: &ControlPlaneTestContext) { .unwrap(); } +#[nexus_test] +async fn test_disk_resource_usage(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + let nexus = &cptestctx.server.apictx.nexus; + let datastore = nexus.datastore(); + + let _test = DiskTest::new(&cptestctx).await; + + create_ip_pool(&client, "p0", None, None).await; + let org_id = create_organization(&client, ORG_NAME).await.identity.id; + let project_id1 = + create_project(client, ORG_NAME, PROJECT_NAME).await.identity.id; + let project_id2 = + create_project(client, ORG_NAME, PROJECT_NAME_2).await.identity.id; + + let opctx = + OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); + + // The project and organization should start as empty. + let resource_usage = + datastore.resource_usage_get(&opctx, project_id1).await.unwrap(); + assert_eq!(resource_usage.disk_bytes_used, 0); + let resource_usage = + datastore.resource_usage_get(&opctx, project_id2).await.unwrap(); + assert_eq!(resource_usage.disk_bytes_used, 0); + let resource_usage = + datastore.resource_usage_get(&opctx, org_id).await.unwrap(); + assert_eq!(resource_usage.disk_bytes_used, 0); + + // Ask for a 1 gibibyte disk in the first project. + // + // This disk should appear in the accounting information for the project + // in which it was allocated + let disk_size = ByteCount::from_gibibytes_u32(1); + let disks_url = + format!("/organizations/{}/projects/{}/disks", ORG_NAME, PROJECT_NAME); + let disk_one = params::DiskCreate { + identity: IdentityMetadataCreateParams { + name: "disk-one".parse().unwrap(), + description: String::from("sells rainsticks"), + }, + disk_source: params::DiskSource::Blank { + block_size: params::BlockSize::try_from(512).unwrap(), + }, + size: disk_size, + }; + NexusRequest::new( + RequestBuilder::new(client, Method::POST, &disks_url) + .body(Some(&disk_one)) + .expect_status(Some(StatusCode::CREATED)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("unexpected failure creating 1 GiB disk"); + let resource_usage = + datastore.resource_usage_get(&opctx, project_id1).await.unwrap(); + assert_eq!(resource_usage.disk_bytes_used, 3 * disk_size.to_bytes() as i64); + let resource_usage = + datastore.resource_usage_get(&opctx, project_id2).await.unwrap(); + assert_eq!(resource_usage.disk_bytes_used, 0); + let resource_usage = + datastore.resource_usage_get(&opctx, org_id).await.unwrap(); + assert_eq!(resource_usage.disk_bytes_used, 3 * disk_size.to_bytes() as i64); + + // Ask for a 1 gibibyte disk in the second project. + // + // Each project should be using "one disk" of real storage, but the org + // should be using both. + let disks_url = format!( + "/organizations/{}/projects/{}/disks", + ORG_NAME, PROJECT_NAME_2 + ); + let disk_one = params::DiskCreate { + identity: IdentityMetadataCreateParams { + name: "disk-two".parse().unwrap(), + description: String::from("sells rainsticks"), + }, + disk_source: params::DiskSource::Blank { + block_size: params::BlockSize::try_from(512).unwrap(), + }, + size: disk_size, + }; + NexusRequest::new( + RequestBuilder::new(client, Method::POST, &disks_url) + .body(Some(&disk_one)) + .expect_status(Some(StatusCode::CREATED)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("unexpected failure creating 1 GiB disk"); + let resource_usage = + datastore.resource_usage_get(&opctx, project_id1).await.unwrap(); + assert_eq!(resource_usage.disk_bytes_used, 3 * disk_size.to_bytes() as i64); + let resource_usage = + datastore.resource_usage_get(&opctx, project_id2).await.unwrap(); + assert_eq!(resource_usage.disk_bytes_used, 3 * disk_size.to_bytes() as i64); + let resource_usage = + datastore.resource_usage_get(&opctx, org_id).await.unwrap(); + assert_eq!( + resource_usage.disk_bytes_used, + 2 * 3 * disk_size.to_bytes() as i64 + ); + + // Delete the disk we just created, observe the utilization drop + // accordingly. + let disk_url = format!("{}/{}", disks_url, "disk-two"); + NexusRequest::object_delete(client, &disk_url) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("failed to delete disk"); + let resource_usage = + datastore.resource_usage_get(&opctx, project_id1).await.unwrap(); + assert_eq!(resource_usage.disk_bytes_used, 3 * disk_size.to_bytes() as i64); + let resource_usage = + datastore.resource_usage_get(&opctx, project_id2).await.unwrap(); + assert_eq!(resource_usage.disk_bytes_used, 0); + let resource_usage = + datastore.resource_usage_get(&opctx, org_id).await.unwrap(); + assert_eq!(resource_usage.disk_bytes_used, 3 * disk_size.to_bytes() as i64); +} + // Test disk size accounting #[nexus_test] async fn test_disk_size_accounting(cptestctx: &ControlPlaneTestContext) { From 98467b2ee19fa33d92e6a5005ab7716823c9814f Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 4 Oct 2022 19:50:47 -0400 Subject: [PATCH 05/80] test snapshot usage accounting --- nexus/tests/integration_tests/snapshots.rs | 39 ++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/nexus/tests/integration_tests/snapshots.rs b/nexus/tests/integration_tests/snapshots.rs index 7231d1a1c92..58d7ee46932 100644 --- a/nexus/tests/integration_tests/snapshots.rs +++ b/nexus/tests/integration_tests/snapshots.rs @@ -286,9 +286,11 @@ async fn test_snapshot_without_instance(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; + let nexus = &cptestctx.server.apictx.nexus; + let datastore = nexus.datastore(); DiskTest::new(&cptestctx).await; create_ip_pool(&client, "p0", None, None).await; - create_org_and_project(client).await; + let project_id = create_org_and_project(client).await; let disks_url = get_disks_url(); // Create a blank disk @@ -317,6 +319,11 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .parsed_body() .unwrap(); + let opctx = + OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); + let usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); + assert_eq!(usage.disk_bytes_used, 3 * disk_size.to_bytes() as i64); + // Issue snapshot request let snapshots_url = format!( "/organizations/{}/projects/{}/snapshots", @@ -331,13 +338,15 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { name: "not-attached".parse().unwrap(), description: "not attached to instance".into(), }, - disk: base_disk_name, + disk: base_disk_name.clone(), }, ) .await; assert_eq!(snapshot.disk_id, base_disk.identity.id); assert_eq!(snapshot.size, base_disk.size); + let usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); + assert_eq!(usage.disk_bytes_used, 2 * 3 * disk_size.to_bytes() as i64); // Create a disk from this snapshot let disk_size = ByteCount::from_gibibytes_u32(2); @@ -365,6 +374,9 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .parsed_body() .unwrap(); + let usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); + assert_eq!(usage.disk_bytes_used, 3 * 3 * disk_size.to_bytes() as i64); + // Delete snapshot let snapshot_url = format!( "/organizations/{}/projects/{}/snapshots/not-attached", @@ -379,6 +391,29 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .execute() .await .unwrap(); + + let usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); + assert_eq!(usage.disk_bytes_used, 2 * 3 * disk_size.to_bytes() as i64); + + // Delete the disk using the snapshot + let disk_url = format!("{}/{}", disks_url, snap_disk_name); + NexusRequest::object_delete(client, &disk_url) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("failed to delete disk"); + let usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); + assert_eq!(usage.disk_bytes_used, 3 * disk_size.to_bytes() as i64); + + // Delete the original base disk + let disk_url = format!("{}/{}", disks_url, base_disk_name); + NexusRequest::object_delete(client, &disk_url) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("failed to delete disk"); + let usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); + assert_eq!(usage.disk_bytes_used, 0); } // Test the various ways Nexus can reject a disk created from a snapshot From f8383653a4983cf036e0fd3aae975bf2495faa67 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 4 Oct 2022 21:22:08 -0400 Subject: [PATCH 06/80] Emit disk usage info to clickhouse --- nexus/src/app/mod.rs | 6 +- nexus/src/context.rs | 1 + nexus/src/db/datastore/mod.rs | 13 ++- nexus/src/db/datastore/resource_usage.rs | 93 +++++++++++++++---- nexus/src/db/queries/resource_usage_update.rs | 10 +- 5 files changed, 101 insertions(+), 22 deletions(-) diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index 172d83b9123..098e96cf2c2 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -14,6 +14,7 @@ use crate::populate::populate_start; use crate::populate::PopulateArgs; use crate::populate::PopulateStatus; use crate::saga_interface::SagaContext; +use ::oximeter::types::ProducerRegistry; use anyhow::anyhow; use omicron_common::api::external::Error; use slog::Logger; @@ -123,12 +124,15 @@ impl Nexus { log: Logger, resolver: internal_dns_client::multiclient::Resolver, pool: db::Pool, + producer_registry: &ProducerRegistry, config: &config::Config, authz: Arc, ) -> Arc { let pool = Arc::new(pool); - let my_sec_id = db::SecId::from(config.deployment.id); let db_datastore = Arc::new(db::DataStore::new(Arc::clone(&pool))); + db_datastore.register_producers(&producer_registry); + + let my_sec_id = db::SecId::from(config.deployment.id); let sec_store = Arc::new(db::CockroachDbSecStore::new( my_sec_id, Arc::clone(&db_datastore), diff --git a/nexus/src/context.rs b/nexus/src/context.rs index 6f44504e6ae..f82ed523d10 100644 --- a/nexus/src/context.rs +++ b/nexus/src/context.rs @@ -173,6 +173,7 @@ impl ServerContext { log.new(o!("component" => "nexus")), resolver, pool, + &producer_registry, config, Arc::clone(&authz), ) diff --git a/nexus/src/db/datastore/mod.rs b/nexus/src/db/datastore/mod.rs index e57ceb29487..80e9c3c0a6e 100644 --- a/nexus/src/db/datastore/mod.rs +++ b/nexus/src/db/datastore/mod.rs @@ -26,6 +26,7 @@ use crate::db::{ self, error::{public_error_from_diesel_pool, ErrorHandler}, }; +use ::oximeter::types::ProducerRegistry; use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionManager}; use diesel::pg::Pg; use diesel::prelude::*; @@ -104,6 +105,7 @@ impl RunnableQuery for T where pub struct DataStore { pool: Arc, + resource_usage_producer: resource_usage::Producer, } // The majority of `DataStore`'s methods live in our submodules as a concession @@ -111,7 +113,16 @@ pub struct DataStore { // recompilation of that query's module instead of all queries on `DataStore`. impl DataStore { pub fn new(pool: Arc) -> Self { - DataStore { pool } + DataStore { + pool, + resource_usage_producer: resource_usage::Producer::new(), + } + } + + pub fn register_producers(&self, registry: &ProducerRegistry) { + registry + .register_producer(self.resource_usage_producer.clone()) + .unwrap(); } // TODO-security This should be deprecated in favor of pool_authorized(), diff --git a/nexus/src/db/datastore/resource_usage.rs b/nexus/src/db/datastore/resource_usage.rs index aa4129137f6..35c3bd4bac4 100644 --- a/nexus/src/db/datastore/resource_usage.rs +++ b/nexus/src/db/datastore/resource_usage.rs @@ -15,26 +15,81 @@ use async_bb8_diesel::AsyncRunQueryDsl; use diesel::prelude::*; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; +use oximeter::{types::Sample, Metric, MetricsError, Target}; +use std::sync::{Arc, Mutex}; use uuid::Uuid; +/// A collection which holds resources (such as a project, organization, or +/// silo). +#[derive(Debug, Clone, Target)] +struct CollectionTarget { + id: Uuid, +} + +#[derive(Debug, Clone, Metric)] +struct DiskUsageMetric { + #[datum] + bytes_used: i64, +} + +#[derive(Debug, Default, Clone)] +pub struct Producer { + updates: Arc>>, +} + +impl Producer { + pub fn new() -> Self { + Self { updates: Arc::new(Mutex::new(vec![])) } + } + + fn append(&self, usages: &Vec) { + let mut new_updates = usages + .iter() + .map(|usage| { + ( + CollectionTarget { id: usage.id }, + DiskUsageMetric { bytes_used: usage.disk_bytes_used }, + ) + }) + .collect::>(); + let mut pending_updates = self.updates.lock().unwrap(); + pending_updates.append(&mut new_updates); + } +} + +impl oximeter::Producer for Producer { + fn produce( + &mut self, + ) -> Result + 'static>, MetricsError> { + let updates = + std::mem::replace(&mut *self.updates.lock().unwrap(), vec![]); + let samples = updates + .into_iter() + .map(|(target, metric)| Sample::new(&target, &metric)); + Ok(Box::new(samples)) + } +} + impl DataStore { /// Create a resource_usage pub async fn resource_usage_create( &self, opctx: &OpContext, resource_usage: ResourceUsage, - ) -> Result<(), Error> { + ) -> Result, Error> { use db::schema::resource_usage::dsl; - diesel::insert_into(dsl::resource_usage) - .values(resource_usage) - .on_conflict_do_nothing() - .execute_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; - Ok(()) + let usages: Vec = + diesel::insert_into(dsl::resource_usage) + .values(resource_usage) + .on_conflict_do_nothing() + .get_results_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool(e, ErrorHandler::Server) + })?; + self.resource_usage_producer.append(&usages); + Ok(usages) } pub async fn resource_usage_get( @@ -78,13 +133,15 @@ impl DataStore { opctx: &OpContext, project_id: Uuid, disk_byte_diff: i64, - ) -> Result<(), Error> { - ResourceUsageUpdate::new_update_disk(project_id, disk_byte_diff) - .execute_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; - Ok(()) + ) -> Result, Error> { + let usages = + ResourceUsageUpdate::new_update_disk(project_id, disk_byte_diff) + .get_results_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool(e, ErrorHandler::Server) + })?; + self.resource_usage_producer.append(&usages); + Ok(usages) } } diff --git a/nexus/src/db/queries/resource_usage_update.rs b/nexus/src/db/queries/resource_usage_update.rs index f2740f03138..a285162c30a 100644 --- a/nexus/src/db/queries/resource_usage_update.rs +++ b/nexus/src/db/queries/resource_usage_update.rs @@ -5,6 +5,7 @@ //! Implementation of queries for updating resource usage info. use crate::db::alias::ExpressionAlias; +use crate::db::model::ResourceUsage; use crate::db::pool::DbConnection; use crate::db::subquery::{AsQuerySource, Cte, CteBuilder, CteQuery}; use db_macros::Subquery; @@ -12,6 +13,7 @@ use diesel::pg::Pg; use diesel::query_builder::{AstPass, Query, QueryFragment, QueryId}; use diesel::{ sql_types, CombineDsl, ExpressionMethods, IntoSql, QueryDsl, RunQueryDsl, + SelectableHelper, }; use nexus_db_model::queries::resource_usage_update::{ all_collections, parent_org, parent_silo, @@ -125,7 +127,7 @@ impl ResourceUsageUpdate { .filter(dsl::id.eq_any( all_collections.query_source().select(all_collections::id), )) - .returning(dsl::id), + .returning(ResourceUsage::as_returning()), ); let cte = CteBuilder::new() @@ -150,8 +152,12 @@ impl QueryFragment for ResourceUsageUpdate { } } +type SelectableSql = < + >::SelectExpression as diesel::Expression +>::SqlType; + impl Query for ResourceUsageUpdate { - type SqlType = sql_types::Uuid; + type SqlType = SelectableSql; } impl RunQueryDsl for ResourceUsageUpdate {} From f1e4293b9cf30d8b02e4ed43282995ef1676dfff Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 5 Oct 2022 01:29:38 -0400 Subject: [PATCH 07/80] Add external API endpoint for querying metrics endpoints --- Cargo.lock | 1 + nexus/src/external_api/http_entrypoints.rs | 37 ++++++++++++++++++++ nexus/test-utils/src/lib.rs | 1 + nexus/tests/integration_tests/disks.rs | 39 +++++++++++++++++++++- nexus/types/Cargo.toml | 1 + nexus/types/src/external_api/params.rs | 10 ++++++ 6 files changed, 88 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index adae29b30ca..87b2d6d1026 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2710,6 +2710,7 @@ dependencies = [ "api_identity", "base64", "chrono", + "dropshot", "omicron-common", "openssl", "openssl-probe", diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 85837f11d5c..71ddf41a721 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -241,6 +241,7 @@ pub fn external_api() -> NexusApiDescription { api.register(system_image_view_by_id)?; api.register(system_image_delete)?; + api.register(system_metrics_list)?; api.register(updates_refresh)?; api.register(user_list)?; @@ -4023,6 +4024,42 @@ async fn updates_refresh( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } +// Metrics + +/// Access metrics data +#[endpoint { + method = GET, + path = "/system/metrics/resource-utilization", + tags = ["system"], +}] +async fn system_metrics_list( + rqctx: Arc>>, + query_params: Query, +) -> Result>, HttpError> { + let apictx = rqctx.context(); + let nexus = &apictx.nexus; + + let query = query_params.into_inner(); + let limit = rqctx.page_limit(&query.pagination)?; + + let handler = async { + let opctx = OpContext::for_external_api(&rqctx).await?; + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + + let result = nexus + .select_timeseries( + "collection_target:disk_usage_metric", + &[&format!("id=={}", query.id)], + query.pagination, + limit, + ) + .await?; + + Ok(HttpResponseOk(result)) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + // Sagas /// List sagas diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index 82b01c3a0ff..148c375dfdf 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -162,6 +162,7 @@ pub async fn test_setup_with_config( .await .unwrap(); register_test_producer(&producer).unwrap(); + server.register_as_producer().await; ControlPlaneTestContext { server, diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index a699e82284e..fbf0f39a5fb 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -1297,7 +1297,7 @@ async fn query_for_metrics_until_they_exist( async fn test_disk_metrics(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; + let project_id = create_org_and_project(client).await; create_disk(&client, ORG_NAME, PROJECT_NAME, DISK_NAME).await; // Whenever we grab this URL, get the surrounding few seconds of metrics. @@ -1335,6 +1335,43 @@ async fn test_disk_metrics(cptestctx: &ControlPlaneTestContext) { assert!(cumulative.start_time() <= item.timestamp()); } } + + // Check the utilization info for the whole project too. + let utilization_url = |id: Uuid| { + format!( + "/system/metrics/resource-utilization?start_time={:?}&end_time={:?}&id={:?}", + Utc::now() - chrono::Duration::seconds(2), + Utc::now() + chrono::Duration::seconds(2), + id, + ) + }; + + let get_i64 = |measurement: &oximeter::types::Measurement| -> i64 { + match measurement.datum() { + oximeter::types::Datum::I64(value) => *value, + _ => panic!("Unexpected datum type: {:?}", measurement.datum()), + } + }; + + // We should see two measurements: One when the project was created, and + // another once the disk modified the size. + let measurements = query_for_metrics_until_they_exist( + client, + &utilization_url(project_id), + ) + .await; + assert_eq!( + measurements.items.len(), + 2, + "Unexpected items: {:#?}", + measurements.items + ); + assert_eq!(get_i64(&measurements.items[0]), 0); + assert!( + get_i64(&measurements.items[1]) > 0, + "Unexpected items: {:#?}", + measurements.items + ); } #[nexus_test] diff --git a/nexus/types/Cargo.toml b/nexus/types/Cargo.toml index 1be5c6a1ed0..3c5415029fb 100644 --- a/nexus/types/Cargo.toml +++ b/nexus/types/Cargo.toml @@ -8,6 +8,7 @@ license = "MPL-2.0" anyhow = "1.0" chrono = { version = "0.4", features = ["serde"] } base64 = "0.13.0" +dropshot = { git = "https://github.com/oxidecomputer/dropshot", branch = "main", features = [ "usdt-probes" ] } # must match samael's crate! openssl = "0.10" openssl-sys = "0.9" diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index b5045da5d93..93b08980ee5 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -854,3 +854,13 @@ pub struct ResourceMetrics { /// An exclusive end time of metrics. pub end_time: DateTime, } + +#[derive(Debug, Deserialize, JsonSchema)] +pub struct ResourceUtilization { + #[serde(flatten)] + pub pagination: + dropshot::PaginationParams, + + /// The UUID of the container being queried + pub id: Uuid, +} From fda4c3c9075c4ee4dffe68fd3fc3c05af726229f Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 5 Oct 2022 01:53:40 -0400 Subject: [PATCH 08/80] fix json spec, minor tweaks --- nexus/src/external_api/http_entrypoints.rs | 4 +- nexus/test-utils/src/lib.rs | 1 - nexus/tests/integration_tests/disks.rs | 4 ++ nexus/tests/output/nexus_tags.txt | 1 + openapi/nexus.json | 83 ++++++++++++++++++++++ 5 files changed, 90 insertions(+), 3 deletions(-) diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 71ddf41a721..e41bc66a3d4 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -241,7 +241,7 @@ pub fn external_api() -> NexusApiDescription { api.register(system_image_view_by_id)?; api.register(system_image_delete)?; - api.register(system_metrics_list)?; + api.register(system_metrics_utilization_list)?; api.register(updates_refresh)?; api.register(user_list)?; @@ -4032,7 +4032,7 @@ async fn updates_refresh( path = "/system/metrics/resource-utilization", tags = ["system"], }] -async fn system_metrics_list( +async fn system_metrics_utilization_list( rqctx: Arc>>, query_params: Query, ) -> Result>, HttpError> { diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index 148c375dfdf..82b01c3a0ff 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -162,7 +162,6 @@ pub async fn test_setup_with_config( .await .unwrap(); register_test_producer(&producer).unwrap(); - server.register_as_producer().await; ControlPlaneTestContext { server, diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index fbf0f39a5fb..1c9d2ae926c 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -1295,6 +1295,10 @@ async fn query_for_metrics_until_they_exist( #[nexus_test] async fn test_disk_metrics(cptestctx: &ControlPlaneTestContext) { + // Normally, Nexus is not registered as a producer for tests. + // Turn this bit on so we can also test some metrics from Nexus itself. + cptestctx.server.register_as_producer().await; + let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; let project_id = create_org_and_project(client).await; diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index 3fa987fbadf..182b780ab15 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -145,6 +145,7 @@ system_image_delete /system/images/{image_name} system_image_list /system/images system_image_view /system/images/{image_name} system_image_view_by_id /system/by-id/images/{id} +system_metrics_utilization_list /system/metrics/resource-utilization system_user_list /system/user system_user_view /system/user/{user_name} updates_refresh /system/updates/refresh diff --git a/openapi/nexus.json b/openapi/nexus.json index 8bad5a1b690..6f8530c0ca4 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -6367,6 +6367,89 @@ } } }, + "/system/metrics/resource-utilization": { + "get": { + "tags": [ + "system" + ], + "summary": "Access metrics data", + "operationId": "system_metrics_utilization_list", + "parameters": [ + { + "in": "query", + "name": "end_time", + "description": "An exclusive end time of metrics.", + "schema": { + "type": "string", + "format": "date-time" + }, + "style": "form" + }, + { + "in": "query", + "name": "id", + "description": "The UUID of the container being queried", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + }, + "style": "form" + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + }, + "style": "form" + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + }, + "style": "form" + }, + { + "in": "query", + "name": "start_time", + "description": "An inclusive start time of metrics.", + "schema": { + "type": "string", + "format": "date-time" + }, + "style": "form" + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MeasurementResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": true + } + }, "/system/policy": { "get": { "tags": [ From b20fefd15401cab994a305cb8e8be7912cd8f076 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 5 Oct 2022 14:04:27 -0400 Subject: [PATCH 09/80] Fix unauth tests --- Cargo.lock | 1 - nexus/src/external_api/http_entrypoints.rs | 14 +++++++++++++- nexus/tests/integration_tests/endpoints.rs | 19 +++++++++++++++++++ .../unauthorized_coverage.rs | 5 +++-- nexus/types/Cargo.toml | 1 - nexus/types/src/external_api/params.rs | 10 ---------- 6 files changed, 35 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 87b2d6d1026..adae29b30ca 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2710,7 +2710,6 @@ dependencies = [ "api_identity", "base64", "chrono", - "dropshot", "omicron-common", "openssl", "openssl-probe", diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index e41bc66a3d4..c21e5a1a4aa 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -4026,6 +4026,18 @@ async fn updates_refresh( // Metrics +#[derive(Debug, Deserialize, JsonSchema)] +pub struct ResourceUtilization { + #[serde(flatten)] + pub pagination: dropshot::PaginationParams< + params::ResourceMetrics, + params::ResourceMetrics, + >, + + /// The UUID of the container being queried + pub id: Uuid, +} + /// Access metrics data #[endpoint { method = GET, @@ -4034,7 +4046,7 @@ async fn updates_refresh( }] async fn system_metrics_utilization_list( rqctx: Arc>>, - query_params: Query, + query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let nexus = &apictx.nexus; diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index b1aebe01599..ccf5e5a3ece 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -386,6 +386,14 @@ lazy_static! { group_attribute_name: None, }; + + pub static ref DEMO_METRICS_UTILIZATION_URL: String = + format!( + "/system/metrics/resource-utilization?start_time={:?}&end_time={:?}&id={}", + Utc::now(), + Utc::now(), + "3aaf22ae-5691-4f6d-b62c-aa532512fa78", + ); } /// Describes an API endpoint to be verified by the "unauthorized" test @@ -1384,6 +1392,17 @@ lazy_static! { )], }, + /* Metrics */ + + VerifyEndpoint { + url: &*DEMO_METRICS_UTILIZATION_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + ], + }, + /* Global Images */ VerifyEndpoint { diff --git a/nexus/tests/integration_tests/unauthorized_coverage.rs b/nexus/tests/integration_tests/unauthorized_coverage.rs index bbaebe6f79d..2532b4f0ec9 100644 --- a/nexus/tests/integration_tests/unauthorized_coverage.rs +++ b/nexus/tests/integration_tests/unauthorized_coverage.rs @@ -85,8 +85,9 @@ fn test_unauthorized_coverage() { // a VerifyEndpoint for it. let method_string = m.http_method().to_string().to_uppercase(); let found = spec_operations.iter().find(|(op, regex)| { - op.method.to_uppercase() == method_string - && regex.is_match(v.url) + // Strip query parameters, if they exist. + let url = v.url.split('?').next().unwrap(); + op.method.to_uppercase() == method_string && regex.is_match(url) }); if let Some((op, _)) = found { println!( diff --git a/nexus/types/Cargo.toml b/nexus/types/Cargo.toml index 3c5415029fb..1be5c6a1ed0 100644 --- a/nexus/types/Cargo.toml +++ b/nexus/types/Cargo.toml @@ -8,7 +8,6 @@ license = "MPL-2.0" anyhow = "1.0" chrono = { version = "0.4", features = ["serde"] } base64 = "0.13.0" -dropshot = { git = "https://github.com/oxidecomputer/dropshot", branch = "main", features = [ "usdt-probes" ] } # must match samael's crate! openssl = "0.10" openssl-sys = "0.9" diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 93b08980ee5..b5045da5d93 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -854,13 +854,3 @@ pub struct ResourceMetrics { /// An exclusive end time of metrics. pub end_time: DateTime, } - -#[derive(Debug, Deserialize, JsonSchema)] -pub struct ResourceUtilization { - #[serde(flatten)] - pub pagination: - dropshot::PaginationParams, - - /// The UUID of the container being queried - pub id: Uuid, -} From 5ff619e31001720c6dfed48d0106fddbccfbd52a Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 5 Oct 2022 14:19:22 -0400 Subject: [PATCH 10/80] Boost timeout to not miss utilization --- nexus/tests/integration_tests/disks.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index 1c9d2ae926c..5a2095b5728 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -1344,8 +1344,8 @@ async fn test_disk_metrics(cptestctx: &ControlPlaneTestContext) { let utilization_url = |id: Uuid| { format!( "/system/metrics/resource-utilization?start_time={:?}&end_time={:?}&id={:?}", - Utc::now() - chrono::Duration::seconds(2), - Utc::now() + chrono::Duration::seconds(2), + Utc::now() - chrono::Duration::seconds(20), + Utc::now() + chrono::Duration::seconds(20), id, ) }; From 5c33dba478668f1091d0435b749f739d64a916ea Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 6 Oct 2022 17:10:26 -0400 Subject: [PATCH 11/80] Add CPUs provisioned --- common/src/sql/dbinit.sql | 7 +- nexus/db-model/src/resource_usage.rs | 5 +- nexus/db-model/src/schema.rs | 3 +- nexus/src/app/instance.rs | 9 ++- nexus/src/app/sagas/instance_create.rs | 45 +++++++++++ nexus/src/db/datastore/resource_usage.rs | 78 ++++++++++++++----- nexus/src/db/queries/resource_usage_update.rs | 45 ++++++++--- nexus/src/external_api/http_entrypoints.rs | 63 ++++++++++++--- nexus/src/external_api/tag-config.json | 2 +- nexus/tests/integration_tests/disks.rs | 44 +++++++---- nexus/tests/integration_tests/endpoints.rs | 6 +- nexus/tests/integration_tests/snapshots.rs | 27 +++++-- nexus/tests/output/nexus_tags.txt | 2 +- openapi/nexus.json | 24 +++++- 14 files changed, 290 insertions(+), 70 deletions(-) diff --git a/common/src/sql/dbinit.sql b/common/src/sql/dbinit.sql index ecfe6ea933f..a66731bb88c 100644 --- a/common/src/sql/dbinit.sql +++ b/common/src/sql/dbinit.sql @@ -140,7 +140,12 @@ CREATE TABLE omicron.public.resource_usage ( /* Should match the UUID of the corresponding collection */ id UUID PRIMARY KEY, - disk_bytes_used INT8 NOT NULL + -- The amount of physical disk space which has been provisioned + -- on behalf of the collection. + physical_disk_bytes_provisioned INT8 NOT NULL, + + -- The number of CPUs provisioned by VMs. + cpus_provisioned INT8 NOT NULL ); /* diff --git a/nexus/db-model/src/resource_usage.rs b/nexus/db-model/src/resource_usage.rs index a219db8fd2d..9fe781580e8 100644 --- a/nexus/db-model/src/resource_usage.rs +++ b/nexus/db-model/src/resource_usage.rs @@ -11,11 +11,12 @@ use uuid::Uuid; pub struct ResourceUsage { pub id: Uuid, - pub disk_bytes_used: i64, + pub physical_disk_bytes_provisioned: i64, + pub cpus_provisioned: i64, } impl ResourceUsage { pub fn new(id: Uuid) -> Self { - Self { id, disk_bytes_used: 0 } + Self { id, physical_disk_bytes_provisioned: 0, cpus_provisioned: 0 } } } diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index ffce46a9175..9abfa3b7cf7 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -400,7 +400,8 @@ table! { table! { resource_usage { id -> Uuid, - disk_bytes_used -> Int8, + physical_disk_bytes_provisioned -> Int8, + cpus_provisioned -> Int8, } } diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index ed2417d8a3a..f49edc12b6c 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -239,7 +239,7 @@ impl super::Nexus { // TODO-robustness We need to figure out what to do with Destroyed // instances? Presumably we need to clean them up at some point, but // not right away so that callers can see that they've been destroyed. - let (.., authz_instance, _) = + let (.., project, authz_instance, instance) = LookupPath::new(opctx, &self.db_datastore) .organization_name(organization_name) .project_name(project_name) @@ -250,6 +250,13 @@ impl super::Nexus { self.db_datastore .project_delete_instance(opctx, &authz_instance) .await?; + self.db_datastore + .resource_usage_update_cpus( + &opctx, + project.id(), + -i64::from(instance.runtime_state.ncpus.0 .0), + ) + .await?; self.db_datastore .instance_delete_all_network_interfaces(opctx, &authz_instance) .await?; diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index 7f87c5e1e2c..86faa5753b7 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -79,6 +79,11 @@ lazy_static! { "instance-create.alloc-server", sic_alloc_server ); + static ref RESOURCES_ACCOUNT: NexusAction = ActionFunc::new_action( + "instance.account-resources", + sic_account_resources, + sic_account_resources_undo, + ); static ref ALLOC_PROPOLIS_IP: NexusAction = new_action_noop_undo( "instance-create.allocate-propolis-ip", sic_allocate_propolis_ip, @@ -840,6 +845,46 @@ pub(super) async fn allocate_sled_ipv6( .map_err(ActionError::action_failed) } +// TODO: Not yet idempotent +async fn sic_account_resources( + sagactx: NexusActionContext, +) -> Result<(), ActionError> { + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + + let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); + osagactx + .datastore() + .resource_usage_update_cpus( + &opctx, + params.project_id, + i64::from(params.create_params.ncpus.0), + ) + .await + .map_err(ActionError::action_failed)?; + Ok(()) +} + +// TODO: Not yet idempotent +async fn sic_account_resources_undo( + sagactx: NexusActionContext, +) -> Result<(), anyhow::Error> { + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + + let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); + osagactx + .datastore() + .resource_usage_update_cpus( + &opctx, + params.project_id, + -i64::from(params.create_params.ncpus.0), + ) + .await + .map_err(ActionError::action_failed)?; + Ok(()) +} + // Allocate an IP address on the destination sled for the Propolis server async fn sic_allocate_propolis_ip( sagactx: NexusActionContext, diff --git a/nexus/src/db/datastore/resource_usage.rs b/nexus/src/db/datastore/resource_usage.rs index 35c3bd4bac4..f7b745fa6ca 100644 --- a/nexus/src/db/datastore/resource_usage.rs +++ b/nexus/src/db/datastore/resource_usage.rs @@ -27,33 +27,60 @@ struct CollectionTarget { } #[derive(Debug, Clone, Metric)] -struct DiskUsageMetric { +struct PhysicalDiskSpaceProvisioned { #[datum] bytes_used: i64, } +#[derive(Debug, Clone, Metric)] +struct CpusProvisioned { + #[datum] + cpus: i64, +} + #[derive(Debug, Default, Clone)] pub struct Producer { - updates: Arc>>, + samples: Arc>>, } impl Producer { pub fn new() -> Self { - Self { updates: Arc::new(Mutex::new(vec![])) } + Self { samples: Arc::new(Mutex::new(vec![])) } + } + + fn append_disk_metrics(&self, usages: &Vec) { + let new_samples = usages + .iter() + .map(|usage| { + Sample::new( + &CollectionTarget { id: usage.id }, + &PhysicalDiskSpaceProvisioned { + bytes_used: usage.physical_disk_bytes_provisioned, + }, + ) + }) + .collect::>(); + + self.append(new_samples); } - fn append(&self, usages: &Vec) { - let mut new_updates = usages + fn append_cpu_metrics(&self, usages: &Vec) { + let new_samples = usages .iter() .map(|usage| { - ( - CollectionTarget { id: usage.id }, - DiskUsageMetric { bytes_used: usage.disk_bytes_used }, + Sample::new( + &CollectionTarget { id: usage.id }, + &CpusProvisioned { cpus: usage.cpus_provisioned }, ) }) .collect::>(); - let mut pending_updates = self.updates.lock().unwrap(); - pending_updates.append(&mut new_updates); + + self.append(new_samples); + } + + fn append(&self, mut new_samples: Vec) { + let mut pending_samples = self.samples.lock().unwrap(); + pending_samples.append(&mut new_samples); } } @@ -61,12 +88,9 @@ impl oximeter::Producer for Producer { fn produce( &mut self, ) -> Result + 'static>, MetricsError> { - let updates = - std::mem::replace(&mut *self.updates.lock().unwrap(), vec![]); - let samples = updates - .into_iter() - .map(|(target, metric)| Sample::new(&target, &metric)); - Ok(Box::new(samples)) + let samples = + std::mem::replace(&mut *self.samples.lock().unwrap(), vec![]); + Ok(Box::new(samples.into_iter())) } } @@ -88,7 +112,8 @@ impl DataStore { .map_err(|e| { public_error_from_diesel_pool(e, ErrorHandler::Server) })?; - self.resource_usage_producer.append(&usages); + self.resource_usage_producer.append_disk_metrics(&usages); + self.resource_usage_producer.append_cpu_metrics(&usages); Ok(usages) } @@ -141,7 +166,24 @@ impl DataStore { .map_err(|e| { public_error_from_diesel_pool(e, ErrorHandler::Server) })?; - self.resource_usage_producer.append(&usages); + self.resource_usage_producer.append_disk_metrics(&usages); + Ok(usages) + } + + pub async fn resource_usage_update_cpus( + &self, + opctx: &OpContext, + project_id: Uuid, + cpus_diff: i64, + ) -> Result, Error> { + let usages = + ResourceUsageUpdate::new_update_cpus(project_id, cpus_diff) + .get_results_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool(e, ErrorHandler::Server) + })?; + self.resource_usage_producer.append_cpu_metrics(&usages); Ok(usages) } } diff --git a/nexus/src/db/queries/resource_usage_update.rs b/nexus/src/db/queries/resource_usage_update.rs index a285162c30a..84b3b2f2b52 100644 --- a/nexus/src/db/queries/resource_usage_update.rs +++ b/nexus/src/db/queries/resource_usage_update.rs @@ -7,6 +7,7 @@ use crate::db::alias::ExpressionAlias; use crate::db::model::ResourceUsage; use crate::db::pool::DbConnection; +use crate::db::schema::resource_usage; use crate::db::subquery::{AsQuerySource, Cte, CteBuilder, CteQuery}; use db_macros::Subquery; use diesel::pg::Pg; @@ -107,23 +108,29 @@ pub struct ResourceUsageUpdate { } impl ResourceUsageUpdate { - pub fn new_update_disk( - project_id: uuid::Uuid, - disk_bytes_diff: i64, - ) -> Self { + // Generic utility for updating all collections including this resource, + // even transitively. + // + // Includes: + // - Project + // - Organization + // - Silo + fn apply_update(project_id: uuid::Uuid, values: V) -> Self + where + V: diesel::AsChangeset, + ::Changeset: + QueryFragment + Send + 'static, + { let parent_org = ParentOrg::new(project_id); let parent_silo = ParentSilo::new(&parent_org); let all_collections = AllCollections::new(project_id, &parent_org, &parent_silo); - use crate::db::schema::resource_usage::dsl; + use resource_usage::dsl; let final_update = Box::new( diesel::update(dsl::resource_usage) - .set( - dsl::disk_bytes_used - .eq(dsl::disk_bytes_used + disk_bytes_diff), - ) + .set(values) .filter(dsl::id.eq_any( all_collections.query_source().select(all_collections::id), )) @@ -138,6 +145,26 @@ impl ResourceUsageUpdate { Self { cte } } + + pub fn new_update_disk( + project_id: uuid::Uuid, + disk_bytes_diff: i64, + ) -> Self { + use resource_usage::dsl; + Self::apply_update( + project_id, + dsl::physical_disk_bytes_provisioned + .eq(dsl::physical_disk_bytes_provisioned + disk_bytes_diff), + ) + } + + pub fn new_update_cpus(project_id: uuid::Uuid, cpus_diff: i64) -> Self { + use resource_usage::dsl; + Self::apply_update( + project_id, + dsl::cpus_provisioned.eq(dsl::cpus_provisioned + cpus_diff), + ) + } } impl QueryFragment for ResourceUsageUpdate { diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index a66b83c3ade..26bd16f2df4 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -241,7 +241,7 @@ pub fn external_api() -> NexusApiDescription { api.register(system_image_view_by_id)?; api.register(system_image_delete)?; - api.register(system_metrics_utilization_list)?; + api.register(system_metrics_list)?; api.register(updates_refresh)?; api.register(user_list)?; @@ -4035,37 +4035,76 @@ pub struct ResourceUtilization { >, /// The UUID of the container being queried + // TODO: I might want to force the caller to specify type here? pub id: Uuid, } +#[derive(Display, Deserialize, JsonSchema)] +#[display(style = "snake_case")] +#[serde(rename_all = "snake_case")] +pub enum ResourceName { + PhysicalDiskSpaceProvisioned, + PhysicalDiskSpaceCapacity, + CpusProvisioned, + CpuCapacity, + RamProvisioned, + RamCapacity, +} + +#[derive(Deserialize, JsonSchema)] +struct SystemMetricsPathParam { + resource_name: ResourceName, +} + /// Access metrics data #[endpoint { method = GET, - path = "/system/metrics/resource-utilization", + path = "/system/metrics/{resource_name}", tags = ["system"], }] -async fn system_metrics_utilization_list( +async fn system_metrics_list( rqctx: Arc>>, + path_params: Path, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let nexus = &apictx.nexus; + let resource_name = path_params.into_inner().resource_name; let query = query_params.into_inner(); let limit = rqctx.page_limit(&query.pagination)?; let handler = async { let opctx = OpContext::for_external_api(&rqctx).await?; - opctx.authorize(authz::Action::Read, &authz::FLEET).await?; - let result = nexus - .select_timeseries( - "collection_target:disk_usage_metric", - &[&format!("id=={}", query.id)], - query.pagination, - limit, - ) - .await?; + let result = match resource_name { + ResourceName::PhysicalDiskSpaceProvisioned => { + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + nexus + .select_timeseries( + "collection_target:physical_disk_space_provisioned", + &[&format!("id=={}", query.id)], + query.pagination, + limit, + ) + .await? + } + ResourceName::PhysicalDiskSpaceCapacity => todo!(), + ResourceName::CpusProvisioned => { + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + nexus + .select_timeseries( + "collection_target:cpus_provisioned", + &[&format!("id=={}", query.id)], + query.pagination, + limit, + ) + .await? + } + ResourceName::CpuCapacity => todo!(), + ResourceName::RamProvisioned => todo!(), + ResourceName::RamCapacity => todo!(), + }; Ok(HttpResponseOk(result)) }; diff --git a/nexus/src/external_api/tag-config.json b/nexus/src/external_api/tag-config.json index 960fe81c8f8..ce3a069132a 100644 --- a/nexus/src/external_api/tag-config.json +++ b/nexus/src/external_api/tag-config.json @@ -147,4 +147,4 @@ } } } -} \ No newline at end of file +} diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index 5a2095b5728..96cb1e77b9d 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -919,13 +919,13 @@ async fn test_disk_resource_usage(cptestctx: &ControlPlaneTestContext) { // The project and organization should start as empty. let resource_usage = datastore.resource_usage_get(&opctx, project_id1).await.unwrap(); - assert_eq!(resource_usage.disk_bytes_used, 0); + assert_eq!(resource_usage.physical_disk_bytes_provisioned, 0); let resource_usage = datastore.resource_usage_get(&opctx, project_id2).await.unwrap(); - assert_eq!(resource_usage.disk_bytes_used, 0); + assert_eq!(resource_usage.physical_disk_bytes_provisioned, 0); let resource_usage = datastore.resource_usage_get(&opctx, org_id).await.unwrap(); - assert_eq!(resource_usage.disk_bytes_used, 0); + assert_eq!(resource_usage.physical_disk_bytes_provisioned, 0); // Ask for a 1 gibibyte disk in the first project. // @@ -955,13 +955,19 @@ async fn test_disk_resource_usage(cptestctx: &ControlPlaneTestContext) { .expect("unexpected failure creating 1 GiB disk"); let resource_usage = datastore.resource_usage_get(&opctx, project_id1).await.unwrap(); - assert_eq!(resource_usage.disk_bytes_used, 3 * disk_size.to_bytes() as i64); + assert_eq!( + resource_usage.physical_disk_bytes_provisioned, + 3 * disk_size.to_bytes() as i64 + ); let resource_usage = datastore.resource_usage_get(&opctx, project_id2).await.unwrap(); - assert_eq!(resource_usage.disk_bytes_used, 0); + assert_eq!(resource_usage.physical_disk_bytes_provisioned, 0); let resource_usage = datastore.resource_usage_get(&opctx, org_id).await.unwrap(); - assert_eq!(resource_usage.disk_bytes_used, 3 * disk_size.to_bytes() as i64); + assert_eq!( + resource_usage.physical_disk_bytes_provisioned, + 3 * disk_size.to_bytes() as i64 + ); // Ask for a 1 gibibyte disk in the second project. // @@ -992,14 +998,20 @@ async fn test_disk_resource_usage(cptestctx: &ControlPlaneTestContext) { .expect("unexpected failure creating 1 GiB disk"); let resource_usage = datastore.resource_usage_get(&opctx, project_id1).await.unwrap(); - assert_eq!(resource_usage.disk_bytes_used, 3 * disk_size.to_bytes() as i64); + assert_eq!( + resource_usage.physical_disk_bytes_provisioned, + 3 * disk_size.to_bytes() as i64 + ); let resource_usage = datastore.resource_usage_get(&opctx, project_id2).await.unwrap(); - assert_eq!(resource_usage.disk_bytes_used, 3 * disk_size.to_bytes() as i64); + assert_eq!( + resource_usage.physical_disk_bytes_provisioned, + 3 * disk_size.to_bytes() as i64 + ); let resource_usage = datastore.resource_usage_get(&opctx, org_id).await.unwrap(); assert_eq!( - resource_usage.disk_bytes_used, + resource_usage.physical_disk_bytes_provisioned, 2 * 3 * disk_size.to_bytes() as i64 ); @@ -1013,13 +1025,19 @@ async fn test_disk_resource_usage(cptestctx: &ControlPlaneTestContext) { .expect("failed to delete disk"); let resource_usage = datastore.resource_usage_get(&opctx, project_id1).await.unwrap(); - assert_eq!(resource_usage.disk_bytes_used, 3 * disk_size.to_bytes() as i64); + assert_eq!( + resource_usage.physical_disk_bytes_provisioned, + 3 * disk_size.to_bytes() as i64 + ); let resource_usage = datastore.resource_usage_get(&opctx, project_id2).await.unwrap(); - assert_eq!(resource_usage.disk_bytes_used, 0); + assert_eq!(resource_usage.physical_disk_bytes_provisioned, 0); let resource_usage = datastore.resource_usage_get(&opctx, org_id).await.unwrap(); - assert_eq!(resource_usage.disk_bytes_used, 3 * disk_size.to_bytes() as i64); + assert_eq!( + resource_usage.physical_disk_bytes_provisioned, + 3 * disk_size.to_bytes() as i64 + ); } // Test disk size accounting @@ -1343,7 +1361,7 @@ async fn test_disk_metrics(cptestctx: &ControlPlaneTestContext) { // Check the utilization info for the whole project too. let utilization_url = |id: Uuid| { format!( - "/system/metrics/resource-utilization?start_time={:?}&end_time={:?}&id={:?}", + "/system/metrics/physical_disk_space_provisioned?start_time={:?}&end_time={:?}&id={:?}", Utc::now() - chrono::Duration::seconds(20), Utc::now() + chrono::Duration::seconds(20), id, diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index d4ea66e46aa..e1c1fb9285f 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -387,9 +387,9 @@ lazy_static! { group_attribute_name: None, }; - pub static ref DEMO_METRICS_UTILIZATION_URL: String = + pub static ref DEMO_SYSTEM_METRICS_URL: String = format!( - "/system/metrics/resource-utilization?start_time={:?}&end_time={:?}&id={}", + "/system/metrics/physical_disk_space_provisioned?start_time={:?}&end_time={:?}&id={}", Utc::now(), Utc::now(), "3aaf22ae-5691-4f6d-b62c-aa532512fa78", @@ -1395,7 +1395,7 @@ lazy_static! { /* Metrics */ VerifyEndpoint { - url: &*DEMO_METRICS_UTILIZATION_URL, + url: &*DEMO_SYSTEM_METRICS_URL, visibility: Visibility::Public, unprivileged_access: UnprivilegedAccess::None, allowed_methods: vec![ diff --git a/nexus/tests/integration_tests/snapshots.rs b/nexus/tests/integration_tests/snapshots.rs index 58d7ee46932..d77d5be406d 100644 --- a/nexus/tests/integration_tests/snapshots.rs +++ b/nexus/tests/integration_tests/snapshots.rs @@ -322,7 +322,10 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); let usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); - assert_eq!(usage.disk_bytes_used, 3 * disk_size.to_bytes() as i64); + assert_eq!( + usage.physical_disk_bytes_provisioned, + 3 * disk_size.to_bytes() as i64 + ); // Issue snapshot request let snapshots_url = format!( @@ -346,7 +349,10 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { assert_eq!(snapshot.disk_id, base_disk.identity.id); assert_eq!(snapshot.size, base_disk.size); let usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); - assert_eq!(usage.disk_bytes_used, 2 * 3 * disk_size.to_bytes() as i64); + assert_eq!( + usage.physical_disk_bytes_provisioned, + 2 * 3 * disk_size.to_bytes() as i64 + ); // Create a disk from this snapshot let disk_size = ByteCount::from_gibibytes_u32(2); @@ -375,7 +381,10 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .unwrap(); let usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); - assert_eq!(usage.disk_bytes_used, 3 * 3 * disk_size.to_bytes() as i64); + assert_eq!( + usage.physical_disk_bytes_provisioned, + 3 * 3 * disk_size.to_bytes() as i64 + ); // Delete snapshot let snapshot_url = format!( @@ -393,7 +402,10 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .unwrap(); let usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); - assert_eq!(usage.disk_bytes_used, 2 * 3 * disk_size.to_bytes() as i64); + assert_eq!( + usage.physical_disk_bytes_provisioned, + 2 * 3 * disk_size.to_bytes() as i64 + ); // Delete the disk using the snapshot let disk_url = format!("{}/{}", disks_url, snap_disk_name); @@ -403,7 +415,10 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .await .expect("failed to delete disk"); let usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); - assert_eq!(usage.disk_bytes_used, 3 * disk_size.to_bytes() as i64); + assert_eq!( + usage.physical_disk_bytes_provisioned, + 3 * disk_size.to_bytes() as i64 + ); // Delete the original base disk let disk_url = format!("{}/{}", disks_url, base_disk_name); @@ -413,7 +428,7 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .await .expect("failed to delete disk"); let usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); - assert_eq!(usage.disk_bytes_used, 0); + assert_eq!(usage.physical_disk_bytes_provisioned, 0); } // Test the various ways Nexus can reject a disk created from a snapshot diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index 3b91eb004cd..0ebcbe3baab 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -145,7 +145,7 @@ system_image_delete /system/images/{image_name} system_image_list /system/images system_image_view /system/images/{image_name} system_image_view_by_id /system/by-id/images/{id} -system_metrics_utilization_list /system/metrics/resource-utilization +system_metrics_list /system/metrics/{resource_name} system_user_list /system/user system_user_view /system/user/{user_name} updates_refresh /system/updates/refresh diff --git a/openapi/nexus.json b/openapi/nexus.json index ba6f312ccce..58d62ec2a87 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -6357,14 +6357,23 @@ } } }, - "/system/metrics/resource-utilization": { + "/system/metrics/{resource_name}": { "get": { "tags": [ "system" ], "summary": "Access metrics data", - "operationId": "system_metrics_utilization_list", + "operationId": "system_metrics_list", "parameters": [ + { + "in": "path", + "name": "resource_name", + "required": true, + "schema": { + "$ref": "#/components/schemas/ResourceName" + }, + "style": "simple" + }, { "in": "query", "name": "end_time", @@ -12277,6 +12286,17 @@ ] } ] + }, + "ResourceName": { + "type": "string", + "enum": [ + "physical_disk_space_provisioned", + "physical_disk_space_capacity", + "cpus_provisioned", + "cpu_capacity", + "ram_provisioned", + "ram_capacity" + ] } } }, From 4c484677d03430bbfa2f3e9332c7aa12de1b5213 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 6 Oct 2022 21:41:56 -0400 Subject: [PATCH 12/80] Add resource usage for non-default silo, add fleet to DB --- common/src/sql/dbinit.sql | 11 ++++ nexus/db-model/src/fleet.rs | 28 ++++++++++ nexus/db-model/src/lib.rs | 2 + nexus/db-model/src/rack.rs | 5 +- nexus/db-model/src/schema.rs | 9 ++++ nexus/src/app/rack.rs | 3 +- nexus/src/db/datastore/fleet.rs | 69 ++++++++++++++++++++++++ nexus/src/db/datastore/mod.rs | 3 +- nexus/src/db/datastore/organization.rs | 6 ++- nexus/src/db/datastore/project.rs | 7 ++- nexus/src/db/datastore/resource_usage.rs | 21 +++++--- nexus/src/db/datastore/silo.rs | 8 ++- nexus/src/populate.rs | 36 ++++++++++++- nexus/types/src/external_api/views.rs | 9 ++++ 14 files changed, 201 insertions(+), 16 deletions(-) create mode 100644 nexus/db-model/src/fleet.rs create mode 100644 nexus/src/db/datastore/fleet.rs diff --git a/common/src/sql/dbinit.sql b/common/src/sql/dbinit.sql index a66731bb88c..bfbb151a003 100644 --- a/common/src/sql/dbinit.sql +++ b/common/src/sql/dbinit.sql @@ -39,6 +39,14 @@ CREATE DATABASE omicron; CREATE USER omicron; ALTER DEFAULT PRIVILEGES GRANT INSERT, SELECT, UPDATE, DELETE ON TABLES to omicron; +-- Fleet: Represents a collection of racks +CREATE TABLE omicron.public.fleet ( + /* Identity metadata (asset) */ + id UUID PRIMARY KEY, + time_created TIMESTAMPTZ NOT NULL, + time_modified TIMESTAMPTZ NOT NULL +); + /* * Racks */ @@ -48,6 +56,9 @@ CREATE TABLE omicron.public.rack ( time_created TIMESTAMPTZ NOT NULL, time_modified TIMESTAMPTZ NOT NULL, + -- The fleet to which this rack belongs + fleet_id UUID NOT NULL, + /* * Identifies if rack management has been transferred from RSS -> Nexus. * If "false", RSS is still managing sleds, services, and DNS records. diff --git a/nexus/db-model/src/fleet.rs b/nexus/db-model/src/fleet.rs new file mode 100644 index 00000000000..56f642f7f7f --- /dev/null +++ b/nexus/db-model/src/fleet.rs @@ -0,0 +1,28 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::schema::fleet; +use db_macros::Asset; +use nexus_types::external_api::views; +use uuid::Uuid; + +/// Information about a fleet +#[derive(Queryable, Insertable, Debug, Clone, Selectable, Asset)] +#[diesel(table_name = fleet)] +pub struct Fleet { + #[diesel(embed)] + pub identity: FleetIdentity, +} + +impl Fleet { + pub fn new(id: Uuid) -> Self { + Self { identity: FleetIdentity::new(id) } + } +} + +impl From for views::Fleet { + fn from(fleet: Fleet) -> Self { + Self { identity: views::AssetIdentityMetadata::from(&fleet) } + } +} diff --git a/nexus/db-model/src/lib.rs b/nexus/db-model/src/lib.rs index d06f7b28ba4..1727706ca0e 100644 --- a/nexus/db-model/src/lib.rs +++ b/nexus/db-model/src/lib.rs @@ -20,6 +20,7 @@ mod digest; mod disk; mod disk_state; mod external_ip; +mod fleet; mod generation; mod global_image; mod identity_provider; @@ -92,6 +93,7 @@ pub use digest::*; pub use disk::*; pub use disk_state::*; pub use external_ip::*; +pub use fleet::*; pub use generation::*; pub use global_image::*; pub use identity_provider::*; diff --git a/nexus/db-model/src/rack.rs b/nexus/db-model/src/rack.rs index 63ce64f996d..ff39773b775 100644 --- a/nexus/db-model/src/rack.rs +++ b/nexus/db-model/src/rack.rs @@ -13,15 +13,16 @@ use uuid::Uuid; pub struct Rack { #[diesel(embed)] pub identity: RackIdentity, - + pub fleet_id: Uuid, pub initialized: bool, pub tuf_base_url: Option, } impl Rack { - pub fn new(id: Uuid) -> Self { + pub fn new(id: Uuid, fleet_id: Uuid) -> Self { Self { identity: RackIdentity::new(id), + fleet_id, initialized: false, tuf_base_url: None, } diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 9abfa3b7cf7..a1384fa4f9f 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -350,11 +350,20 @@ table! { } } +table! { + fleet (id) { + id -> Uuid, + time_created -> Timestamptz, + time_modified -> Timestamptz, + } +} + table! { rack (id) { id -> Uuid, time_created -> Timestamptz, time_modified -> Timestamptz, + fleet_id -> Uuid, initialized -> Bool, tuf_base_url -> Nullable, } diff --git a/nexus/src/app/rack.rs b/nexus/src/app/rack.rs index dcc7ce92dbc..8230bf2dc26 100644 --- a/nexus/src/app/rack.rs +++ b/nexus/src/app/rack.rs @@ -43,9 +43,10 @@ impl super::Nexus { &self, opctx: &OpContext, rack_id: Uuid, + fleet_id: Uuid, ) -> Result<(), Error> { self.datastore() - .rack_insert(opctx, &db::model::Rack::new(rack_id)) + .rack_insert(opctx, &db::model::Rack::new(rack_id, fleet_id)) .await?; Ok(()) } diff --git a/nexus/src/db/datastore/fleet.rs b/nexus/src/db/datastore/fleet.rs new file mode 100644 index 00000000000..42b43d4f5be --- /dev/null +++ b/nexus/src/db/datastore/fleet.rs @@ -0,0 +1,69 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods on [`Fleet`]s. + +use super::DataStore; +use crate::authz; +use crate::context::OpContext; +use crate::db; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::identity::Asset; +use crate::db::model::Fleet; +use crate::db::pagination::paginated; +use async_bb8_diesel::AsyncRunQueryDsl; +use diesel::prelude::*; +use diesel::upsert::excluded; +use omicron_common::api::external::DataPageParams; +use omicron_common::api::external::Error; +use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::ResourceType; +use uuid::Uuid; + +impl DataStore { + pub async fn fleet_list( + &self, + opctx: &OpContext, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + use db::schema::fleet::dsl; + paginated(dsl::fleet, dsl::id, pagparams) + .select(Fleet::as_select()) + .load_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + /// Stores a new fleet in the database. + /// + /// This function is a no-op if the fleet already exists. + pub async fn fleet_insert( + &self, + opctx: &OpContext, + fleet: &Fleet, + ) -> Result { + use db::schema::fleet::dsl; + + diesel::insert_into(dsl::fleet) + .values(fleet.clone()) + .on_conflict(dsl::id) + .do_update() + // This is a no-op, since we conflicted on the ID. + .set(dsl::id.eq(excluded(dsl::id))) + .returning(Fleet::as_returning()) + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict( + ResourceType::Fleet, + &fleet.id().to_string(), + ), + ) + }) + } +} diff --git a/nexus/src/db/datastore/mod.rs b/nexus/src/db/datastore/mod.rs index 80e9c3c0a6e..e50e7c7ac44 100644 --- a/nexus/src/db/datastore/mod.rs +++ b/nexus/src/db/datastore/mod.rs @@ -46,6 +46,7 @@ mod dataset; mod device_auth; mod disk; mod external_ip; +mod fleet; mod global_image; mod identity_provider; mod instance; @@ -1018,7 +1019,7 @@ mod test { let (opctx, datastore) = datastore_test(&logctx, &db).await; // Create a Rack, insert it into the DB. - let rack = Rack::new(Uuid::new_v4()); + let rack = Rack::new(Uuid::new_v4(), *db::fixed_data::FLEET_ID); let result = datastore.rack_insert(&opctx, &rack).await.unwrap(); assert_eq!(result.id(), rack.id()); assert_eq!(result.initialized, false); diff --git a/nexus/src/db/datastore/organization.rs b/nexus/src/db/datastore/organization.rs index d16fec24133..b1603c05031 100644 --- a/nexus/src/db/datastore/organization.rs +++ b/nexus/src/db/datastore/organization.rs @@ -79,7 +79,11 @@ impl DataStore { // NOTE: if you do this before the org is created, it'll exist as // soon as the org does. However, that'll work better in a saga/CTE when // unwinding is built-in more naturally. - self.resource_usage_create(opctx, ResourceUsage::new(org.id())).await?; + self.resource_usage_create( + self.pool_authorized(opctx).await?, + ResourceUsage::new(org.id()), + ) + .await?; Ok(org) } diff --git a/nexus/src/db/datastore/project.rs b/nexus/src/db/datastore/project.rs index caf2c073f82..4e57c35fc8e 100644 --- a/nexus/src/db/datastore/project.rs +++ b/nexus/src/db/datastore/project.rs @@ -70,8 +70,11 @@ impl DataStore { // NOTE: if you do this before the project is created, it'll exist as // soon as the project does. However, that'll work better in a saga/CTE when // unwinding is built-in more naturally. - self.resource_usage_create(opctx, ResourceUsage::new(project.id())) - .await?; + self.resource_usage_create( + self.pool_authorized(opctx).await?, + ResourceUsage::new(project.id()), + ) + .await?; Ok(project) } diff --git a/nexus/src/db/datastore/resource_usage.rs b/nexus/src/db/datastore/resource_usage.rs index f7b745fa6ca..8a48ecde8ad 100644 --- a/nexus/src/db/datastore/resource_usage.rs +++ b/nexus/src/db/datastore/resource_usage.rs @@ -10,8 +10,9 @@ use crate::db; use crate::db::error::public_error_from_diesel_pool; use crate::db::error::ErrorHandler; use crate::db::model::ResourceUsage; +use crate::db::pool::DbConnection; use crate::db::queries::resource_usage_update::ResourceUsageUpdate; -use async_bb8_diesel::AsyncRunQueryDsl; +use async_bb8_diesel::{AsyncRunQueryDsl, PoolError}; use diesel::prelude::*; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; @@ -96,21 +97,29 @@ impl oximeter::Producer for Producer { impl DataStore { /// Create a resource_usage - pub async fn resource_usage_create( + pub async fn resource_usage_create( &self, - opctx: &OpContext, + conn: &(impl async_bb8_diesel::AsyncConnection + + Sync), resource_usage: ResourceUsage, - ) -> Result, Error> { + ) -> Result, Error> + where + ConnErr: From + Send + 'static, + PoolError: From, + { use db::schema::resource_usage::dsl; let usages: Vec = diesel::insert_into(dsl::resource_usage) .values(resource_usage) .on_conflict_do_nothing() - .get_results_async(self.pool_authorized(opctx).await?) + .get_results_async(conn) .await .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) + public_error_from_diesel_pool( + PoolError::from(e), + ErrorHandler::Server, + ) })?; self.resource_usage_producer.append_disk_metrics(&usages); self.resource_usage_producer.append_cpu_metrics(&usages); diff --git a/nexus/src/db/datastore/silo.rs b/nexus/src/db/datastore/silo.rs index 03cb2f20ff4..6348426cbf8 100644 --- a/nexus/src/db/datastore/silo.rs +++ b/nexus/src/db/datastore/silo.rs @@ -56,7 +56,7 @@ impl DataStore { info!(opctx.log, "created {} built-in silos", count); self.resource_usage_create( - opctx, + self.pool_authorized(opctx).await?, ResourceUsage::new(DEFAULT_SILO.id()), ) .await?; @@ -148,6 +148,12 @@ impl DataStore { .execute_async(&conn) .await?; + self.resource_usage_create( + &conn, + ResourceUsage::new(DEFAULT_SILO.id()), + ) + .await?; + if let Some(query) = silo_admin_group_ensure_query { query.get_result_async(&conn).await?; } diff --git a/nexus/src/populate.rs b/nexus/src/populate.rs index 062e314c219..54762bc1fd2 100644 --- a/nexus/src/populate.rs +++ b/nexus/src/populate.rs @@ -266,6 +266,31 @@ impl Populator for PopulateSiloUserRoleAssignments { } } +#[derive(Debug)] +struct PopulateFleet; +impl Populator for PopulateFleet { + fn populate<'a, 'b>( + &self, + opctx: &'a OpContext, + datastore: &'a DataStore, + _args: &'a PopulateArgs, + ) -> BoxFuture<'b, Result<(), Error>> + where + 'a: 'b, + { + async { + datastore + .fleet_insert( + opctx, + &db::model::Fleet::new(*db::fixed_data::FLEET_ID), + ) + .await?; + Ok(()) + } + .boxed() + } +} + #[derive(Debug)] struct PopulateRack; impl Populator for PopulateRack { @@ -280,7 +305,13 @@ impl Populator for PopulateRack { { async { datastore - .rack_insert(opctx, &db::model::Rack::new(args.rack_id)) + .rack_insert( + opctx, + &db::model::Rack::new( + args.rack_id, + *db::fixed_data::FLEET_ID, + ), + ) .await?; let params = params::IpPoolCreate { @@ -305,13 +336,14 @@ impl Populator for PopulateRack { } lazy_static! { - static ref ALL_POPULATORS: [&'static dyn Populator; 7] = [ + static ref ALL_POPULATORS: [&'static dyn Populator; 8] = [ &PopulateBuiltinUsers, &PopulateBuiltinRoles, &PopulateBuiltinRoleAssignments, &PopulateBuiltinSilos, &PopulateSiloUsers, &PopulateSiloUserRoleAssignments, + &PopulateFleet, &PopulateRack, ]; } diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index aa68b82ecc5..088f7d87753 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -290,6 +290,15 @@ pub struct ExternalIp { pub kind: IpKind, } +// FLEET + +/// Client view of a [`Fleet`] +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct Fleet { + #[serde(flatten)] + pub identity: AssetIdentityMetadata, +} + // RACKS /// Client view of an [`Rack`] From 356d4fc0d5b2ebf740ea271e6e936e7e0d8d045a Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 6 Oct 2022 21:54:41 -0400 Subject: [PATCH 13/80] Silos know which fleets they belong to, fleet resource accounting --- common/src/sql/dbinit.sql | 1 + .../src/queries/resource_usage_update.rs | 9 ++++ nexus/db-model/src/schema.rs | 1 + nexus/db-model/src/silo.rs | 12 +++-- nexus/src/db/datastore/silo.rs | 6 ++- nexus/src/db/fixed_data/silo.rs | 3 +- nexus/src/db/queries/resource_usage_update.rs | 46 ++++++++++++++++--- 7 files changed, 67 insertions(+), 11 deletions(-) diff --git a/common/src/sql/dbinit.sql b/common/src/sql/dbinit.sql index bfbb151a003..3904e623fcb 100644 --- a/common/src/sql/dbinit.sql +++ b/common/src/sql/dbinit.sql @@ -355,6 +355,7 @@ CREATE TABLE omicron.public.silo ( time_modified TIMESTAMPTZ NOT NULL, time_deleted TIMESTAMPTZ, + fleet_id UUID NOT NULL, discoverable BOOL NOT NULL, authentication_mode omicron.public.authentication_mode NOT NULL, user_provision_type omicron.public.user_provision_type NOT NULL, diff --git a/nexus/db-model/src/queries/resource_usage_update.rs b/nexus/db-model/src/queries/resource_usage_update.rs index f80e5529c8c..f221ddd117b 100644 --- a/nexus/db-model/src/queries/resource_usage_update.rs +++ b/nexus/db-model/src/queries/resource_usage_update.rs @@ -6,6 +6,7 @@ use crate::schema::organization; use crate::schema::resource_usage; +use crate::schema::silo; table! { parent_org { @@ -19,6 +20,12 @@ table! { } } +table! { + parent_fleet { + id -> Uuid, + } +} + table! { all_collections { id -> Uuid, @@ -26,10 +33,12 @@ table! { } diesel::allow_tables_to_appear_in_same_query!(organization, parent_org,); +diesel::allow_tables_to_appear_in_same_query!(silo, parent_silo,); diesel::allow_tables_to_appear_in_same_query!( resource_usage, parent_org, parent_silo, + parent_fleet, all_collections, ); diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index a1384fa4f9f..7b04587b4d0 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -197,6 +197,7 @@ table! { time_modified -> Timestamptz, time_deleted -> Nullable, + fleet_id -> Uuid, discoverable -> Bool, authentication_mode -> crate::AuthenticationModeEnum, user_provision_type -> crate::UserProvisionTypeEnum, diff --git a/nexus/db-model/src/silo.rs b/nexus/db-model/src/silo.rs index 4e43c44dd41..0f01b07a65b 100644 --- a/nexus/db-model/src/silo.rs +++ b/nexus/db-model/src/silo.rs @@ -85,6 +85,7 @@ pub struct Silo { #[diesel(embed)] identity: SiloIdentity, + pub fleet_id: Uuid, pub discoverable: bool, pub authentication_mode: AuthenticationMode, @@ -96,13 +97,18 @@ pub struct Silo { impl Silo { /// Creates a new database Silo object. - pub fn new(params: params::SiloCreate) -> Self { - Self::new_with_id(Uuid::new_v4(), params) + pub fn new(params: params::SiloCreate, fleet_id: Uuid) -> Self { + Self::new_with_id(Uuid::new_v4(), params, fleet_id) } - pub fn new_with_id(id: Uuid, params: params::SiloCreate) -> Self { + pub fn new_with_id( + id: Uuid, + params: params::SiloCreate, + fleet_id: Uuid, + ) -> Self { Self { identity: SiloIdentity::new(id, params.identity), + fleet_id, discoverable: params.discoverable, authentication_mode: params .identity_mode diff --git a/nexus/src/db/datastore/silo.rs b/nexus/src/db/datastore/silo.rs index 6348426cbf8..f5cc43c974c 100644 --- a/nexus/src/db/datastore/silo.rs +++ b/nexus/src/db/datastore/silo.rs @@ -87,7 +87,11 @@ impl DataStore { let silo_create_query = Self::silo_create_query( opctx, - db::model::Silo::new_with_id(silo_id, new_silo_params.clone()), + db::model::Silo::new_with_id( + silo_id, + new_silo_params.clone(), + *db::fixed_data::FLEET_ID, + ), ) .await?; diff --git a/nexus/src/db/fixed_data/silo.rs b/nexus/src/db/fixed_data/silo.rs index 3c910177291..f633fa76168 100644 --- a/nexus/src/db/fixed_data/silo.rs +++ b/nexus/src/db/fixed_data/silo.rs @@ -21,6 +21,7 @@ lazy_static! { discoverable: false, identity_mode: shared::SiloIdentityMode::LocalOnly, admin_group_name: None, - } + }, + *super::FLEET_ID, ); } diff --git a/nexus/src/db/queries/resource_usage_update.rs b/nexus/src/db/queries/resource_usage_update.rs index 84b3b2f2b52..83e7e970218 100644 --- a/nexus/src/db/queries/resource_usage_update.rs +++ b/nexus/src/db/queries/resource_usage_update.rs @@ -17,7 +17,7 @@ use diesel::{ SelectableHelper, }; use nexus_db_model::queries::resource_usage_update::{ - all_collections, parent_org, parent_silo, + all_collections, parent_fleet, parent_org, parent_silo, }; #[derive(Subquery, QueryId)] @@ -64,6 +64,29 @@ impl ParentSilo { } } +#[derive(Subquery, QueryId)] +#[subquery(name = parent_fleet)] +struct ParentFleet { + query: Box>, +} + +impl ParentFleet { + fn new(parent_silo: &ParentSilo) -> Self { + use crate::db::schema::silo::dsl; + Self { + query: Box::new( + dsl::silo + .filter(dsl::id.eq_any( + parent_silo.query_source().select(parent_silo::id), + )) + .select((ExpressionAlias::new::( + dsl::fleet_id, + ),)), + ), + } + } +} + #[derive(Subquery, QueryId)] #[subquery(name = all_collections)] struct AllCollections { @@ -75,6 +98,7 @@ impl AllCollections { project_id: uuid::Uuid, parent_org: &ParentOrg, parent_silo: &ParentSilo, + parent_fleet: &ParentFleet, ) -> Self { Self { query: Box::new( @@ -92,9 +116,12 @@ impl AllCollections { ExpressionAlias::new::( parent_silo::id, ), - ))), // TODO: Presumably, we could also update the fleet containing - // the silo here. However, such an object does not exist in the - // database at the time of writing this comment. + ))) + .union(parent_fleet.query_source().select(( + ExpressionAlias::new::( + parent_fleet::id, + ), + ))), ), } } @@ -115,6 +142,7 @@ impl ResourceUsageUpdate { // - Project // - Organization // - Silo + // - Fleet fn apply_update(project_id: uuid::Uuid, values: V) -> Self where V: diesel::AsChangeset, @@ -123,8 +151,13 @@ impl ResourceUsageUpdate { { let parent_org = ParentOrg::new(project_id); let parent_silo = ParentSilo::new(&parent_org); - let all_collections = - AllCollections::new(project_id, &parent_org, &parent_silo); + let parent_fleet = ParentFleet::new(&parent_silo); + let all_collections = AllCollections::new( + project_id, + &parent_org, + &parent_silo, + &parent_fleet, + ); use resource_usage::dsl; @@ -140,6 +173,7 @@ impl ResourceUsageUpdate { let cte = CteBuilder::new() .add_subquery(parent_org) .add_subquery(parent_silo) + .add_subquery(parent_fleet) .add_subquery(all_collections) .build(final_update); From ba874bb8279f39c53fafa1fc6b8bb45392473c81 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 6 Oct 2022 23:08:53 -0400 Subject: [PATCH 14/80] Okay, fleet 'resource_usage' now created / tested, should be working --- nexus/src/db/datastore/organization.rs | 6 +----- nexus/src/db/datastore/project.rs | 7 ++----- nexus/src/db/datastore/resource_usage.rs | 11 ++++++++++- nexus/src/db/datastore/silo.rs | 4 ++-- nexus/src/populate.rs | 8 ++++---- nexus/tests/integration_tests/disks.rs | 19 +++++++++++++++++++ 6 files changed, 38 insertions(+), 17 deletions(-) diff --git a/nexus/src/db/datastore/organization.rs b/nexus/src/db/datastore/organization.rs index b1603c05031..d16fec24133 100644 --- a/nexus/src/db/datastore/organization.rs +++ b/nexus/src/db/datastore/organization.rs @@ -79,11 +79,7 @@ impl DataStore { // NOTE: if you do this before the org is created, it'll exist as // soon as the org does. However, that'll work better in a saga/CTE when // unwinding is built-in more naturally. - self.resource_usage_create( - self.pool_authorized(opctx).await?, - ResourceUsage::new(org.id()), - ) - .await?; + self.resource_usage_create(opctx, ResourceUsage::new(org.id())).await?; Ok(org) } diff --git a/nexus/src/db/datastore/project.rs b/nexus/src/db/datastore/project.rs index 4e57c35fc8e..caf2c073f82 100644 --- a/nexus/src/db/datastore/project.rs +++ b/nexus/src/db/datastore/project.rs @@ -70,11 +70,8 @@ impl DataStore { // NOTE: if you do this before the project is created, it'll exist as // soon as the project does. However, that'll work better in a saga/CTE when // unwinding is built-in more naturally. - self.resource_usage_create( - self.pool_authorized(opctx).await?, - ResourceUsage::new(project.id()), - ) - .await?; + self.resource_usage_create(opctx, ResourceUsage::new(project.id())) + .await?; Ok(project) } diff --git a/nexus/src/db/datastore/resource_usage.rs b/nexus/src/db/datastore/resource_usage.rs index 8a48ecde8ad..4c0df672a50 100644 --- a/nexus/src/db/datastore/resource_usage.rs +++ b/nexus/src/db/datastore/resource_usage.rs @@ -97,7 +97,16 @@ impl oximeter::Producer for Producer { impl DataStore { /// Create a resource_usage - pub async fn resource_usage_create( + pub async fn resource_usage_create( + &self, + opctx: &OpContext, + resource_usage: ResourceUsage, + ) -> Result, Error> { + let pool = self.pool_authorized(opctx).await?; + self.resource_usage_create_on_connection(pool, resource_usage).await + } + + pub(crate) async fn resource_usage_create_on_connection( &self, conn: &(impl async_bb8_diesel::AsyncConnection + Sync), diff --git a/nexus/src/db/datastore/silo.rs b/nexus/src/db/datastore/silo.rs index f5cc43c974c..93e939f9180 100644 --- a/nexus/src/db/datastore/silo.rs +++ b/nexus/src/db/datastore/silo.rs @@ -56,7 +56,7 @@ impl DataStore { info!(opctx.log, "created {} built-in silos", count); self.resource_usage_create( - self.pool_authorized(opctx).await?, + opctx, ResourceUsage::new(DEFAULT_SILO.id()), ) .await?; @@ -152,7 +152,7 @@ impl DataStore { .execute_async(&conn) .await?; - self.resource_usage_create( + self.resource_usage_create_on_connection( &conn, ResourceUsage::new(DEFAULT_SILO.id()), ) diff --git a/nexus/src/populate.rs b/nexus/src/populate.rs index 54762bc1fd2..40362250336 100644 --- a/nexus/src/populate.rs +++ b/nexus/src/populate.rs @@ -279,12 +279,12 @@ impl Populator for PopulateFleet { 'a: 'b, { async { + let id = *db::fixed_data::FLEET_ID; + datastore.fleet_insert(opctx, &db::model::Fleet::new(id)).await?; datastore - .fleet_insert( - opctx, - &db::model::Fleet::new(*db::fixed_data::FLEET_ID), - ) + .resource_usage_create(opctx, db::model::ResourceUsage::new(id)) .await?; + Ok(()) } .boxed() diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index 96cb1e77b9d..8249f5e6c89 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -33,6 +33,7 @@ use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::Instance; use omicron_common::api::external::Name; use omicron_common::backoff; +use omicron_nexus::db::fixed_data::{silo::SILO_ID, FLEET_ID}; use omicron_nexus::TestInterfaces as _; use omicron_nexus::{context::OpContext, external_api::params, Nexus}; use oximeter::types::Datum; @@ -926,6 +927,12 @@ async fn test_disk_resource_usage(cptestctx: &ControlPlaneTestContext) { let resource_usage = datastore.resource_usage_get(&opctx, org_id).await.unwrap(); assert_eq!(resource_usage.physical_disk_bytes_provisioned, 0); + let resource_usage = + datastore.resource_usage_get(&opctx, *SILO_ID).await.unwrap(); + assert_eq!(resource_usage.physical_disk_bytes_provisioned, 0); + let resource_usage = + datastore.resource_usage_get(&opctx, *FLEET_ID).await.unwrap(); + assert_eq!(resource_usage.physical_disk_bytes_provisioned, 0); // Ask for a 1 gibibyte disk in the first project. // @@ -968,6 +975,18 @@ async fn test_disk_resource_usage(cptestctx: &ControlPlaneTestContext) { resource_usage.physical_disk_bytes_provisioned, 3 * disk_size.to_bytes() as i64 ); + let resource_usage = + datastore.resource_usage_get(&opctx, *SILO_ID).await.unwrap(); + assert_eq!( + resource_usage.physical_disk_bytes_provisioned, + 3 * disk_size.to_bytes() as i64 + ); + let resource_usage = + datastore.resource_usage_get(&opctx, *FLEET_ID).await.unwrap(); + assert_eq!( + resource_usage.physical_disk_bytes_provisioned, + 3 * disk_size.to_bytes() as i64 + ); // Ask for a 1 gibibyte disk in the second project. // From e0f9919407f55faf05e1bab32528bdc6cd2e2763 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 6 Oct 2022 23:15:30 -0400 Subject: [PATCH 15/80] Actually use CPU accounting saga node --- nexus/src/app/sagas/instance_create.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index 86faa5753b7..ba5149cda3e 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -134,6 +134,7 @@ impl NexusSaga for SagaInstanceCreate { fn register_actions(registry: &mut super::ActionRegistry) { registry.register(Arc::clone(&*ALLOC_SERVER)); + registry.register(Arc::clone(&*RESOURCES_ACCOUNT)); registry.register(Arc::clone(&*ALLOC_PROPOLIS_IP)); registry.register(Arc::clone(&*CREATE_INSTANCE_RECORD)); registry.register(Arc::clone(&*CREATE_NETWORK_INTERFACE)); @@ -169,6 +170,12 @@ impl NexusSaga for SagaInstanceCreate { ALLOC_SERVER.as_ref(), )); + builder.append(Node::action( + "no-result", + "ResourcesAccount", + RESOURCES_ACCOUNT.as_ref(), + )); + builder.append(Node::action( "propolis_ip", "AllocatePropolisIp", From 229480b4f9c421dee4d57c0173325fff6f2f6d03 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 6 Oct 2022 23:36:57 -0400 Subject: [PATCH 16/80] Add test for CPU usage --- nexus/tests/integration_tests/instances.rs | 63 ++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 170e03cfe15..c50e6d53e96 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -25,6 +25,7 @@ use omicron_common::api::external::InstanceState; use omicron_common::api::external::Ipv4Net; use omicron_common::api::external::Name; use omicron_common::api::external::NetworkInterface; +use omicron_nexus::context::OpContext; use omicron_nexus::external_api::shared::IpKind; use omicron_nexus::external_api::shared::IpRange; use omicron_nexus::external_api::shared::Ipv4Range; @@ -447,6 +448,68 @@ async fn test_instances_create_reboot_halt( .unwrap(); } +#[nexus_test] +async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + let apictx = &cptestctx.server.apictx; + let nexus = &apictx.nexus; + let datastore = nexus.datastore(); + + // Create an IP pool and project that we'll use for testing. + create_ip_pool(&client, POOL_NAME, None, None).await; + create_organization(&client, ORGANIZATION_NAME).await; + let url_instances = format!( + "/organizations/{}/projects/{}/instances", + ORGANIZATION_NAME, PROJECT_NAME + ); + let project_id = create_project(&client, ORGANIZATION_NAME, PROJECT_NAME) + .await + .identity + .id; + + let opctx = + OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); + let resource_usage = + datastore.resource_usage_get(&opctx, project_id).await.unwrap(); + assert_eq!(resource_usage.cpus_provisioned, 0); + + // Create an instance. + let instance_url = format!("{}/just-rainsticks", url_instances); + create_instance(client, ORGANIZATION_NAME, PROJECT_NAME, "just-rainsticks") + .await; + let resource_usage = + datastore.resource_usage_get(&opctx, project_id).await.unwrap(); + assert_eq!(resource_usage.cpus_provisioned, 4); + + // Stop the instance + let instance = + instance_post(&client, &instance_url, InstanceOp::Stop).await; + instance_simulate(nexus, &instance.identity.id).await; + let instance = instance_get(&client, &instance_url).await; + assert_eq!(instance.runtime.run_state, InstanceState::Stopped); + // NOTE: I think it's arguably "more correct" to identify that the + // number of CPUs being used by guests at this point is actually "0", + // not "4", because the instance is stopped. + // + // However, for implementation reasons, this is complicated (we have a + // tendency to update the runtime without checking the prior state, which + // makes edge-triggered behavior trickier to notice). + let resource_usage = + datastore.resource_usage_get(&opctx, project_id).await.unwrap(); + assert_eq!(resource_usage.cpus_provisioned, 4); + + // Stop the instance + NexusRequest::object_delete(client, &instance_url) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap(); + + let resource_usage = + datastore.resource_usage_get(&opctx, project_id).await.unwrap(); + assert_eq!(resource_usage.cpus_provisioned, 0); +} + #[nexus_test] async fn test_instances_create_stopped_start( cptestctx: &ControlPlaneTestContext, From d2c5d9de9a4b3a074ea3fc04b494706f708e90f7 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 7 Oct 2022 00:31:22 -0400 Subject: [PATCH 17/80] RAM provisionining metrics are plumbed --- common/src/sql/dbinit.sql | 5 +++- nexus/db-model/src/resource_usage.rs | 8 +++++- nexus/db-model/src/schema.rs | 1 + nexus/src/app/instance.rs | 4 ++- nexus/src/app/sagas/instance_create.rs | 6 ++-- nexus/src/db/datastore/resource_usage.rs | 28 +++++++++++++------ nexus/src/db/queries/resource_usage_update.rs | 11 ++++++-- nexus/src/external_api/http_entrypoints.rs | 18 +++++++----- nexus/tests/integration_tests/instances.rs | 12 +++++++- openapi/nexus.json | 5 +--- 10 files changed, 71 insertions(+), 27 deletions(-) diff --git a/common/src/sql/dbinit.sql b/common/src/sql/dbinit.sql index 3904e623fcb..6ed4cc94810 100644 --- a/common/src/sql/dbinit.sql +++ b/common/src/sql/dbinit.sql @@ -156,7 +156,10 @@ CREATE TABLE omicron.public.resource_usage ( physical_disk_bytes_provisioned INT8 NOT NULL, -- The number of CPUs provisioned by VMs. - cpus_provisioned INT8 NOT NULL + cpus_provisioned INT8 NOT NULL, + + -- The amount of RAM provisioned by VMs. + ram_provisioned INT8 NOT NULL ); /* diff --git a/nexus/db-model/src/resource_usage.rs b/nexus/db-model/src/resource_usage.rs index 9fe781580e8..4908c65960a 100644 --- a/nexus/db-model/src/resource_usage.rs +++ b/nexus/db-model/src/resource_usage.rs @@ -13,10 +13,16 @@ pub struct ResourceUsage { pub physical_disk_bytes_provisioned: i64, pub cpus_provisioned: i64, + pub ram_provisioned: i64, } impl ResourceUsage { pub fn new(id: Uuid) -> Self { - Self { id, physical_disk_bytes_provisioned: 0, cpus_provisioned: 0 } + Self { + id, + physical_disk_bytes_provisioned: 0, + cpus_provisioned: 0, + ram_provisioned: 0, + } } } diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 7b04587b4d0..d53003bfd31 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -412,6 +412,7 @@ table! { id -> Uuid, physical_disk_bytes_provisioned -> Int8, cpus_provisioned -> Int8, + ram_provisioned -> Int8, } } diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index f49edc12b6c..888924aadeb 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -251,10 +251,12 @@ impl super::Nexus { .project_delete_instance(opctx, &authz_instance) .await?; self.db_datastore - .resource_usage_update_cpus( + .resource_usage_update_cpus_and_ram( &opctx, project.id(), -i64::from(instance.runtime_state.ncpus.0 .0), + -i64::try_from(instance.runtime_state.memory.to_bytes()) + .unwrap(), ) .await?; self.db_datastore diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index ba5149cda3e..297a0d14696 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -862,10 +862,11 @@ async fn sic_account_resources( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .resource_usage_update_cpus( + .resource_usage_update_cpus_and_ram( &opctx, params.project_id, i64::from(params.create_params.ncpus.0), + i64::try_from(params.create_params.memory.to_bytes()).unwrap(), ) .await .map_err(ActionError::action_failed)?; @@ -882,10 +883,11 @@ async fn sic_account_resources_undo( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .resource_usage_update_cpus( + .resource_usage_update_cpus_and_ram( &opctx, params.project_id, -i64::from(params.create_params.ncpus.0), + -i64::try_from(params.create_params.memory.to_bytes()).unwrap(), ) .await .map_err(ActionError::action_failed)?; diff --git a/nexus/src/db/datastore/resource_usage.rs b/nexus/src/db/datastore/resource_usage.rs index 4c0df672a50..865068f953d 100644 --- a/nexus/src/db/datastore/resource_usage.rs +++ b/nexus/src/db/datastore/resource_usage.rs @@ -39,6 +39,12 @@ struct CpusProvisioned { cpus: i64, } +#[derive(Debug, Clone, Metric)] +struct RamProvisioned { + #[datum] + bytes: i64, +} + #[derive(Debug, Default, Clone)] pub struct Producer { samples: Arc>>, @@ -74,6 +80,12 @@ impl Producer { &CpusProvisioned { cpus: usage.cpus_provisioned }, ) }) + .chain(usages.iter().map(|usage| { + Sample::new( + &CollectionTarget { id: usage.id }, + &RamProvisioned { bytes: usage.ram_provisioned }, + ) + })) .collect::>(); self.append(new_samples); @@ -188,19 +200,19 @@ impl DataStore { Ok(usages) } - pub async fn resource_usage_update_cpus( + pub async fn resource_usage_update_cpus_and_ram( &self, opctx: &OpContext, project_id: Uuid, cpus_diff: i64, + ram_diff: i64, ) -> Result, Error> { - let usages = - ResourceUsageUpdate::new_update_cpus(project_id, cpus_diff) - .get_results_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + let usages = ResourceUsageUpdate::new_update_cpus_and_ram( + project_id, cpus_diff, ram_diff, + ) + .get_results_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; self.resource_usage_producer.append_cpu_metrics(&usages); Ok(usages) } diff --git a/nexus/src/db/queries/resource_usage_update.rs b/nexus/src/db/queries/resource_usage_update.rs index 83e7e970218..7f931fa56d6 100644 --- a/nexus/src/db/queries/resource_usage_update.rs +++ b/nexus/src/db/queries/resource_usage_update.rs @@ -192,11 +192,18 @@ impl ResourceUsageUpdate { ) } - pub fn new_update_cpus(project_id: uuid::Uuid, cpus_diff: i64) -> Self { + pub fn new_update_cpus_and_ram( + project_id: uuid::Uuid, + cpus_diff: i64, + ram_diff: i64, + ) -> Self { use resource_usage::dsl; Self::apply_update( project_id, - dsl::cpus_provisioned.eq(dsl::cpus_provisioned + cpus_diff), + ( + dsl::cpus_provisioned.eq(dsl::cpus_provisioned + cpus_diff), + dsl::ram_provisioned.eq(dsl::ram_provisioned + ram_diff), + ), ) } } diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 26bd16f2df4..8814583ac69 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -4044,11 +4044,8 @@ pub struct ResourceUtilization { #[serde(rename_all = "snake_case")] pub enum ResourceName { PhysicalDiskSpaceProvisioned, - PhysicalDiskSpaceCapacity, CpusProvisioned, - CpuCapacity, RamProvisioned, - RamCapacity, } #[derive(Deserialize, JsonSchema)] @@ -4089,7 +4086,6 @@ async fn system_metrics_list( ) .await? } - ResourceName::PhysicalDiskSpaceCapacity => todo!(), ResourceName::CpusProvisioned => { opctx.authorize(authz::Action::Read, &authz::FLEET).await?; nexus @@ -4101,9 +4097,17 @@ async fn system_metrics_list( ) .await? } - ResourceName::CpuCapacity => todo!(), - ResourceName::RamProvisioned => todo!(), - ResourceName::RamCapacity => todo!(), + ResourceName::RamProvisioned => { + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + nexus + .select_timeseries( + "collection_target:ram_provisioned", + &[&format!("id=={}", query.id)], + query.pagination, + limit, + ) + .await? + } }; Ok(HttpResponseOk(result)) diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index c50e6d53e96..4522ac5ad9d 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -472,6 +472,7 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { let resource_usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); assert_eq!(resource_usage.cpus_provisioned, 0); + assert_eq!(resource_usage.ram_provisioned, 0); // Create an instance. let instance_url = format!("{}/just-rainsticks", url_instances); @@ -480,6 +481,10 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { let resource_usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); assert_eq!(resource_usage.cpus_provisioned, 4); + assert_eq!( + resource_usage.ram_provisioned, + i64::try_from(ByteCount::from_gibibytes_u32(1).to_bytes()).unwrap(), + ); // Stop the instance let instance = @@ -489,7 +494,7 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { assert_eq!(instance.runtime.run_state, InstanceState::Stopped); // NOTE: I think it's arguably "more correct" to identify that the // number of CPUs being used by guests at this point is actually "0", - // not "4", because the instance is stopped. + // not "4", because the instance is stopped (same re: RAM usage). // // However, for implementation reasons, this is complicated (we have a // tendency to update the runtime without checking the prior state, which @@ -497,6 +502,10 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { let resource_usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); assert_eq!(resource_usage.cpus_provisioned, 4); + assert_eq!( + resource_usage.ram_provisioned, + i64::try_from(ByteCount::from_gibibytes_u32(1).to_bytes()).unwrap(), + ); // Stop the instance NexusRequest::object_delete(client, &instance_url) @@ -508,6 +517,7 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { let resource_usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); assert_eq!(resource_usage.cpus_provisioned, 0); + assert_eq!(resource_usage.ram_provisioned, 0); } #[nexus_test] diff --git a/openapi/nexus.json b/openapi/nexus.json index 58d62ec2a87..cf3fef5205c 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -12291,11 +12291,8 @@ "type": "string", "enum": [ "physical_disk_space_provisioned", - "physical_disk_space_capacity", "cpus_provisioned", - "cpu_capacity", - "ram_provisioned", - "ram_capacity" + "ram_provisioned" ] } } From 2d6f145d064feb8fbd30e1b2a5df19eceb94ab8f Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 7 Oct 2022 12:02:02 -0400 Subject: [PATCH 18/80] minor docs / label updates --- nexus/src/app/sagas/instance_create.rs | 2 +- nexus/src/db/datastore/resource_usage.rs | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index 297a0d14696..00140cd07a4 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -80,7 +80,7 @@ lazy_static! { sic_alloc_server ); static ref RESOURCES_ACCOUNT: NexusAction = ActionFunc::new_action( - "instance.account-resources", + "instance-create.account-resources", sic_account_resources, sic_account_resources_undo, ); diff --git a/nexus/src/db/datastore/resource_usage.rs b/nexus/src/db/datastore/resource_usage.rs index 865068f953d..fe77f37836b 100644 --- a/nexus/src/db/datastore/resource_usage.rs +++ b/nexus/src/db/datastore/resource_usage.rs @@ -183,6 +183,7 @@ impl DataStore { Ok(()) } + /// Transitively updates all provisioned disk usage from project -> fleet. pub async fn resource_usage_update_disk( &self, opctx: &OpContext, @@ -200,6 +201,7 @@ impl DataStore { Ok(usages) } + /// Transitively updates all CPU/RAM usage from project -> fleet. pub async fn resource_usage_update_cpus_and_ram( &self, opctx: &OpContext, From df4bd2f6732ff1cf73efa3c76c7145d8c15da322 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 7 Oct 2022 16:49:19 -0400 Subject: [PATCH 19/80] physical_disk accounting updated to virtual_disk accounting --- common/src/sql/dbinit.sql | 2 +- nexus/db-model/src/region.rs | 5 - nexus/db-model/src/resource_usage.rs | 4 +- nexus/db-model/src/schema.rs | 2 +- nexus/src/app/disk.rs | 9 ++ nexus/src/app/sagas/disk_create.rs | 37 ++---- nexus/src/app/sagas/disk_delete.rs | 66 +++++++++- nexus/src/app/sagas/snapshot_create.rs | 120 ++++++++---------- nexus/src/app/sagas/volume_delete.rs | 19 --- nexus/src/db/datastore/disk.rs | 6 +- nexus/src/db/datastore/resource_usage.rs | 6 +- nexus/src/db/queries/resource_usage_update.rs | 4 +- nexus/src/external_api/http_entrypoints.rs | 6 +- nexus/tests/integration_tests/disks.rs | 52 ++++---- nexus/tests/integration_tests/snapshots.rs | 22 ++-- openapi/nexus.json | 2 +- 16 files changed, 192 insertions(+), 170 deletions(-) diff --git a/common/src/sql/dbinit.sql b/common/src/sql/dbinit.sql index 6ed4cc94810..0b3db9aafed 100644 --- a/common/src/sql/dbinit.sql +++ b/common/src/sql/dbinit.sql @@ -153,7 +153,7 @@ CREATE TABLE omicron.public.resource_usage ( -- The amount of physical disk space which has been provisioned -- on behalf of the collection. - physical_disk_bytes_provisioned INT8 NOT NULL, + virtual_disk_bytes_provisioned INT8 NOT NULL, -- The number of CPUs provisioned by VMs. cpus_provisioned INT8 NOT NULL, diff --git a/nexus/db-model/src/region.rs b/nexus/db-model/src/region.rs index 349972ec510..5fcbaddb4a9 100644 --- a/nexus/db-model/src/region.rs +++ b/nexus/db-model/src/region.rs @@ -75,9 +75,4 @@ impl Region { // external, customer-supplied keys is a non-requirement. true } - pub fn size_used(&self) -> i64 { - self.extent_count() - * self.blocks_per_extent() - * self.block_size().to_bytes() as i64 - } } diff --git a/nexus/db-model/src/resource_usage.rs b/nexus/db-model/src/resource_usage.rs index 4908c65960a..74d7836a525 100644 --- a/nexus/db-model/src/resource_usage.rs +++ b/nexus/db-model/src/resource_usage.rs @@ -11,7 +11,7 @@ use uuid::Uuid; pub struct ResourceUsage { pub id: Uuid, - pub physical_disk_bytes_provisioned: i64, + pub virtual_disk_bytes_provisioned: i64, pub cpus_provisioned: i64, pub ram_provisioned: i64, } @@ -20,7 +20,7 @@ impl ResourceUsage { pub fn new(id: Uuid) -> Self { Self { id, - physical_disk_bytes_provisioned: 0, + virtual_disk_bytes_provisioned: 0, cpus_provisioned: 0, ram_provisioned: 0, } diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index d53003bfd31..85a70780aea 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -410,7 +410,7 @@ table! { table! { resource_usage { id -> Uuid, - physical_disk_bytes_provisioned -> Int8, + virtual_disk_bytes_provisioned -> Int8, cpus_provisioned -> Int8, ram_provisioned -> Int8, } diff --git a/nexus/src/app/disk.rs b/nexus/src/app/disk.rs index 57fe57c224b..a672f5f7837 100644 --- a/nexus/src/app/disk.rs +++ b/nexus/src/app/disk.rs @@ -511,6 +511,15 @@ impl super::Nexus { .fetch() .await?; + // TODO: This should exist within a saga + self.db_datastore + .resource_usage_update_disk( + &opctx, + project.id(), + -i64::try_from(db_snapshot.size.to_bytes()).unwrap(), + ) + .await?; + self.db_datastore .project_delete_snapshot(opctx, &authz_snapshot, &db_snapshot) .await?; diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index 140ed04e16f..a3044ca083c 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -47,10 +47,10 @@ lazy_static! { sdc_alloc_regions, sdc_alloc_regions_undo, ); - static ref REGIONS_ACCOUNT: NexusAction = ActionFunc::new_action( - "disk-create.account-regions", - sdc_account_regions, - sdc_account_regions_undo, + static ref SPACE_ACCOUNT: NexusAction = ActionFunc::new_action( + "disk-create.account-space", + sdc_account_space, + sdc_account_space_undo, ); static ref REGIONS_ENSURE: NexusAction = new_action_noop_undo("disk-create.regions-ensure", sdc_regions_ensure,); @@ -76,7 +76,7 @@ impl NexusSaga for SagaDiskCreate { fn register_actions(registry: &mut ActionRegistry) { registry.register(Arc::clone(&*CREATE_DISK_RECORD)); registry.register(Arc::clone(&*REGIONS_ALLOC)); - registry.register(Arc::clone(&*REGIONS_ACCOUNT)); + registry.register(Arc::clone(&*SPACE_ACCOUNT)); registry.register(Arc::clone(&*REGIONS_ENSURE)); registry.register(Arc::clone(&*CREATE_VOLUME_RECORD)); registry.register(Arc::clone(&*FINALIZE_DISK_RECORD)); @@ -112,8 +112,8 @@ impl NexusSaga for SagaDiskCreate { builder.append(Node::action( "no-result", - "RegionsAccount", - REGIONS_ACCOUNT.as_ref(), + "SpaceAccount", + SPACE_ACCOUNT.as_ref(), )); builder.append(Node::action( @@ -274,33 +274,21 @@ async fn sdc_alloc_regions_undo( Ok(()) } -fn get_space_used_by_allocated_regions( - sagactx: &NexusActionContext, -) -> Result { - let space_used = sagactx - .lookup::>( - "datasets_and_regions", - )? - .into_iter() - .map(|(_, region)| region.size_used()) - .fold(0, |acc, x| acc + x); - Ok(space_used) -} - // TODO: Not yet idempotent -async fn sdc_account_regions( +async fn sdc_account_space( sagactx: NexusActionContext, ) -> Result<(), ActionError> { let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; + let disk_created = sagactx.lookup::("created_disk")?; let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() .resource_usage_update_disk( &opctx, params.project_id, - get_space_used_by_allocated_regions(&sagactx)?, + i64::try_from(disk_created.size.to_bytes()).unwrap(), ) .await .map_err(ActionError::action_failed)?; @@ -308,19 +296,20 @@ async fn sdc_account_regions( } // TODO: Not yet idempotent -async fn sdc_account_regions_undo( +async fn sdc_account_space_undo( sagactx: NexusActionContext, ) -> Result<(), anyhow::Error> { let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; + let disk_created = sagactx.lookup::("created_disk")?; let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() .resource_usage_update_disk( &opctx, params.project_id, - -get_space_used_by_allocated_regions(&sagactx)?, + -i64::try_from(disk_created.size.to_bytes()).unwrap(), ) .await .map_err(ActionError::action_failed)?; diff --git a/nexus/src/app/sagas/disk_delete.rs b/nexus/src/app/sagas/disk_delete.rs index 8bb62c1a270..5c7fb4fc7bf 100644 --- a/nexus/src/app/sagas/disk_delete.rs +++ b/nexus/src/app/sagas/disk_delete.rs @@ -8,12 +8,14 @@ use super::NexusSaga; use crate::app::sagas::NexusAction; use crate::authn; use crate::context::OpContext; +use crate::db; use lazy_static::lazy_static; use serde::Deserialize; use serde::Serialize; use std::sync::Arc; use steno::new_action_noop_undo; use steno::ActionError; +use steno::ActionFunc; use steno::Node; use uuid::Uuid; @@ -36,6 +38,11 @@ lazy_static! { // underlying regions. sdd_delete_disk_record ); + static ref SPACE_ACCOUNT: NexusAction = ActionFunc::new_action( + "disk-delete.account-space", + sdd_account_space, + sdd_account_space_undo, + ); static ref DELETE_VOLUME: NexusAction = new_action_noop_undo( "disk-delete.delete-volume", sdd_delete_volume @@ -52,6 +59,7 @@ impl NexusSaga for SagaDiskDelete { fn register_actions(registry: &mut ActionRegistry) { registry.register(Arc::clone(&*DELETE_DISK_RECORD)); + registry.register(Arc::clone(&*SPACE_ACCOUNT)); registry.register(Arc::clone(&*DELETE_VOLUME)); } @@ -60,10 +68,15 @@ impl NexusSaga for SagaDiskDelete { mut builder: steno::DagBuilder, ) -> Result { builder.append(Node::action( - "volume_id", + "deleted_disk", "DeleteDiskRecord", DELETE_DISK_RECORD.as_ref(), )); + builder.append(Node::action( + "no-result", + "SpaceAccount", + SPACE_ACCOUNT.as_ref(), + )); builder.append(Node::action( "no_result", "DeleteVolume", @@ -77,16 +90,58 @@ impl NexusSaga for SagaDiskDelete { async fn sdd_delete_disk_record( sagactx: NexusActionContext, -) -> Result { +) -> Result { let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; - let volume_id = osagactx + let disk = osagactx .datastore() .project_delete_disk_no_auth(¶ms.disk_id) .await .map_err(ActionError::action_failed)?; - Ok(volume_id) + Ok(disk) +} + +// TODO: Not yet idempotent +async fn sdd_account_space( + sagactx: NexusActionContext, +) -> Result<(), ActionError> { + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + + let deleted_disk = sagactx.lookup::("deleted_disk")?; + let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); + osagactx + .datastore() + .resource_usage_update_disk( + &opctx, + params.project_id, + -i64::try_from(deleted_disk.size.to_bytes()).unwrap(), + ) + .await + .map_err(ActionError::action_failed)?; + Ok(()) +} + +// TODO: Not yet idempotent +async fn sdd_account_space_undo( + sagactx: NexusActionContext, +) -> Result<(), anyhow::Error> { + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + + let deleted_disk = sagactx.lookup::("deleted_disk")?; + let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); + osagactx + .datastore() + .resource_usage_update_disk( + &opctx, + params.project_id, + i64::try_from(deleted_disk.size.to_bytes()).unwrap(), + ) + .await + .map_err(ActionError::action_failed)?; + Ok(()) } async fn sdd_delete_volume( @@ -95,7 +150,8 @@ async fn sdd_delete_volume( let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); - let volume_id = sagactx.lookup::("volume_id")?; + let volume_id = + sagactx.lookup::("deleted_disk")?.volume_id; osagactx .nexus() .volume_delete(&opctx, params.project_id, volume_id) diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index 314be6a4192..d405b48d7db 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -131,11 +131,6 @@ lazy_static! { ssc_alloc_regions, ssc_alloc_regions_undo, ); - static ref REGIONS_ACCOUNT: NexusAction = ActionFunc::new_action( - "snapshot-create.account-regions", - ssc_account_regions, - ssc_account_regions_undo, - ); static ref REGIONS_ENSURE: NexusAction = new_action_noop_undo( "snapshot-create.regions-ensure", ssc_regions_ensure, @@ -151,6 +146,11 @@ lazy_static! { ssc_create_snapshot_record, ssc_create_snapshot_record_undo, ); + static ref SPACE_ACCOUNT: NexusAction = ActionFunc::new_action( + "snapshot-create.account-space", + ssc_account_space, + ssc_account_space_undo, + ); static ref SEND_SNAPSHOT_REQUEST: NexusAction = new_action_noop_undo( "snapshot-create.send-snapshot-request", ssc_send_snapshot_request, @@ -180,10 +180,10 @@ impl NexusSaga for SagaSnapshotCreate { fn register_actions(registry: &mut ActionRegistry) { registry.register(Arc::clone(&*REGIONS_ALLOC)); - registry.register(Arc::clone(&*REGIONS_ACCOUNT)); registry.register(Arc::clone(&*REGIONS_ENSURE)); registry.register(Arc::clone(&*CREATE_DESTINATION_VOLUME_RECORD)); registry.register(Arc::clone(&*CREATE_SNAPSHOT_RECORD)); + registry.register(Arc::clone(&*SPACE_ACCOUNT)); registry.register(Arc::clone(&*SEND_SNAPSHOT_REQUEST)); registry.register(Arc::clone(&*START_RUNNING_SNAPSHOT)); registry.register(Arc::clone(&*CREATE_VOLUME_RECORD)); @@ -219,11 +219,6 @@ impl NexusSaga for SagaSnapshotCreate { "RegionsAlloc", REGIONS_ALLOC.as_ref(), )); - builder.append(Node::action( - "no-result", - "RegionsAccount", - REGIONS_ACCOUNT.as_ref(), - )); builder.append(Node::action( "regions_ensure", "RegionsEnsure", @@ -243,6 +238,12 @@ impl NexusSaga for SagaSnapshotCreate { CREATE_SNAPSHOT_RECORD.as_ref(), )); + builder.append(Node::action( + "no-result", + "RegionsAccount", + SPACE_ACCOUNT.as_ref(), + )); + // Send a snapshot request to a sled-agent builder.append(Node::action( "snapshot_request", @@ -340,59 +341,6 @@ async fn ssc_alloc_regions_undo( Ok(()) } -fn get_space_used_by_allocated_regions( - sagactx: &NexusActionContext, -) -> Result { - let space_used = sagactx - .lookup::>( - "datasets_and_regions", - )? - .into_iter() - .map(|(_, region)| region.size_used()) - .fold(0, |acc, x| acc + x); - Ok(space_used) -} - -// TODO: Not yet idempotent -async fn ssc_account_regions( - sagactx: NexusActionContext, -) -> Result<(), ActionError> { - let osagactx = sagactx.user_data(); - let params = sagactx.saga_params::()?; - - let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); - osagactx - .datastore() - .resource_usage_update_disk( - &opctx, - params.project_id, - get_space_used_by_allocated_regions(&sagactx)?, - ) - .await - .map_err(ActionError::action_failed)?; - Ok(()) -} - -// TODO: Not yet idempotent -async fn ssc_account_regions_undo( - sagactx: NexusActionContext, -) -> Result<(), anyhow::Error> { - let osagactx = sagactx.user_data(); - let params = sagactx.saga_params::()?; - - let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); - osagactx - .datastore() - .resource_usage_update_disk( - &opctx, - params.project_id, - -get_space_used_by_allocated_regions(&sagactx)?, - ) - .await - .map_err(ActionError::action_failed)?; - Ok(()) -} - async fn ssc_regions_ensure( sagactx: NexusActionContext, ) -> Result { @@ -595,6 +543,50 @@ async fn ssc_create_snapshot_record_undo( Ok(()) } +// TODO: Not yet idempotent +async fn ssc_account_space( + sagactx: NexusActionContext, +) -> Result<(), ActionError> { + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + + let snapshot_created = + sagactx.lookup::("created_snapshot")?; + let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); + osagactx + .datastore() + .resource_usage_update_disk( + &opctx, + params.project_id, + i64::try_from(snapshot_created.size.to_bytes()).unwrap(), + ) + .await + .map_err(ActionError::action_failed)?; + Ok(()) +} + +// TODO: Not yet idempotent +async fn ssc_account_space_undo( + sagactx: NexusActionContext, +) -> Result<(), anyhow::Error> { + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + + let snapshot_created = + sagactx.lookup::("created_snapshot")?; + let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); + osagactx + .datastore() + .resource_usage_update_disk( + &opctx, + params.project_id, + -i64::try_from(snapshot_created.size.to_bytes()).unwrap(), + ) + .await + .map_err(ActionError::action_failed)?; + Ok(()) +} + async fn ssc_send_snapshot_request( sagactx: NexusActionContext, ) -> Result<(), ActionError> { diff --git a/nexus/src/app/sagas/volume_delete.rs b/nexus/src/app/sagas/volume_delete.rs index bd783f634cc..15044876946 100644 --- a/nexus/src/app/sagas/volume_delete.rs +++ b/nexus/src/app/sagas/volume_delete.rs @@ -30,7 +30,6 @@ use super::NexusActionContext; use super::NexusSaga; use crate::app::sagas::NexusAction; use crate::authn; -use crate::context::OpContext; use crate::db::datastore::CrucibleResources; use lazy_static::lazy_static; use nexus_types::identity::Asset; @@ -171,7 +170,6 @@ async fn svd_delete_crucible_regions( sagactx: NexusActionContext, ) -> Result<(), ActionError> { let osagactx = sagactx.user_data(); - let params = sagactx.saga_params::()?; let crucible_resources_to_delete = sagactx.lookup::("crucible_resources_to_delete")?; @@ -192,23 +190,6 @@ async fn svd_delete_crucible_regions( .map(|(_, r)| r.id()) .collect(); - // TODO: This accounting is not yet idempotent - let space_used = crucible_resources_to_delete - .datasets_and_regions - .iter() - .fold(0, |acc, (_, r)| acc + r.size_used()); - let opctx = - OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); - osagactx - .datastore() - .resource_usage_update_disk( - &opctx, - params.project_id, - -space_used, - ) - .await - .map_err(ActionError::action_failed)?; - osagactx .datastore() .regions_hard_delete(region_ids_to_delete) diff --git a/nexus/src/db/datastore/disk.rs b/nexus/src/db/datastore/disk.rs index e6a6f2d4eee..781fbc20942 100644 --- a/nexus/src/db/datastore/disk.rs +++ b/nexus/src/db/datastore/disk.rs @@ -469,7 +469,7 @@ impl DataStore { pub async fn project_delete_disk_no_auth( &self, disk_id: &Uuid, - ) -> Result { + ) -> Result { use db::schema::disk::dsl; let pool = self.pool(); let now = Utc::now(); @@ -504,7 +504,7 @@ impl DataStore { })?; match result.status { - UpdateStatus::Updated => Ok(result.found.volume_id), + UpdateStatus::Updated => Ok(result.found), UpdateStatus::NotUpdatedButExists => { let disk = result.found; let disk_state = disk.state(); @@ -514,7 +514,7 @@ impl DataStore { { // To maintain idempotency, if the disk has already been // destroyed, don't throw an error. - return Ok(disk.volume_id); + return Ok(disk); } else if !ok_to_delete_states.contains(disk_state.state()) { return Err(Error::InvalidRequest { message: format!( diff --git a/nexus/src/db/datastore/resource_usage.rs b/nexus/src/db/datastore/resource_usage.rs index fe77f37836b..2fe13d67054 100644 --- a/nexus/src/db/datastore/resource_usage.rs +++ b/nexus/src/db/datastore/resource_usage.rs @@ -28,7 +28,7 @@ struct CollectionTarget { } #[derive(Debug, Clone, Metric)] -struct PhysicalDiskSpaceProvisioned { +struct VirtualDiskSpaceProvisioned { #[datum] bytes_used: i64, } @@ -61,8 +61,8 @@ impl Producer { .map(|usage| { Sample::new( &CollectionTarget { id: usage.id }, - &PhysicalDiskSpaceProvisioned { - bytes_used: usage.physical_disk_bytes_provisioned, + &VirtualDiskSpaceProvisioned { + bytes_used: usage.virtual_disk_bytes_provisioned, }, ) }) diff --git a/nexus/src/db/queries/resource_usage_update.rs b/nexus/src/db/queries/resource_usage_update.rs index 7f931fa56d6..c5d82a1a7fa 100644 --- a/nexus/src/db/queries/resource_usage_update.rs +++ b/nexus/src/db/queries/resource_usage_update.rs @@ -187,8 +187,8 @@ impl ResourceUsageUpdate { use resource_usage::dsl; Self::apply_update( project_id, - dsl::physical_disk_bytes_provisioned - .eq(dsl::physical_disk_bytes_provisioned + disk_bytes_diff), + dsl::virtual_disk_bytes_provisioned + .eq(dsl::virtual_disk_bytes_provisioned + disk_bytes_diff), ) } diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 8814583ac69..e400aafa7f0 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -4043,7 +4043,7 @@ pub struct ResourceUtilization { #[display(style = "snake_case")] #[serde(rename_all = "snake_case")] pub enum ResourceName { - PhysicalDiskSpaceProvisioned, + VirtualDiskSpaceProvisioned, CpusProvisioned, RamProvisioned, } @@ -4075,11 +4075,11 @@ async fn system_metrics_list( let opctx = OpContext::for_external_api(&rqctx).await?; let result = match resource_name { - ResourceName::PhysicalDiskSpaceProvisioned => { + ResourceName::VirtualDiskSpaceProvisioned => { opctx.authorize(authz::Action::Read, &authz::FLEET).await?; nexus .select_timeseries( - "collection_target:physical_disk_space_provisioned", + "collection_target:virtual_disk_space_provisioned", &[&format!("id=={}", query.id)], query.pagination, limit, diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index 8249f5e6c89..8a6685ed01c 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -920,19 +920,19 @@ async fn test_disk_resource_usage(cptestctx: &ControlPlaneTestContext) { // The project and organization should start as empty. let resource_usage = datastore.resource_usage_get(&opctx, project_id1).await.unwrap(); - assert_eq!(resource_usage.physical_disk_bytes_provisioned, 0); + assert_eq!(resource_usage.virtual_disk_bytes_provisioned, 0); let resource_usage = datastore.resource_usage_get(&opctx, project_id2).await.unwrap(); - assert_eq!(resource_usage.physical_disk_bytes_provisioned, 0); + assert_eq!(resource_usage.virtual_disk_bytes_provisioned, 0); let resource_usage = datastore.resource_usage_get(&opctx, org_id).await.unwrap(); - assert_eq!(resource_usage.physical_disk_bytes_provisioned, 0); + assert_eq!(resource_usage.virtual_disk_bytes_provisioned, 0); let resource_usage = datastore.resource_usage_get(&opctx, *SILO_ID).await.unwrap(); - assert_eq!(resource_usage.physical_disk_bytes_provisioned, 0); + assert_eq!(resource_usage.virtual_disk_bytes_provisioned, 0); let resource_usage = datastore.resource_usage_get(&opctx, *FLEET_ID).await.unwrap(); - assert_eq!(resource_usage.physical_disk_bytes_provisioned, 0); + assert_eq!(resource_usage.virtual_disk_bytes_provisioned, 0); // Ask for a 1 gibibyte disk in the first project. // @@ -963,29 +963,29 @@ async fn test_disk_resource_usage(cptestctx: &ControlPlaneTestContext) { let resource_usage = datastore.resource_usage_get(&opctx, project_id1).await.unwrap(); assert_eq!( - resource_usage.physical_disk_bytes_provisioned, - 3 * disk_size.to_bytes() as i64 + resource_usage.virtual_disk_bytes_provisioned, + disk_size.to_bytes() as i64 ); let resource_usage = datastore.resource_usage_get(&opctx, project_id2).await.unwrap(); - assert_eq!(resource_usage.physical_disk_bytes_provisioned, 0); + assert_eq!(resource_usage.virtual_disk_bytes_provisioned, 0); let resource_usage = datastore.resource_usage_get(&opctx, org_id).await.unwrap(); assert_eq!( - resource_usage.physical_disk_bytes_provisioned, - 3 * disk_size.to_bytes() as i64 + resource_usage.virtual_disk_bytes_provisioned, + disk_size.to_bytes() as i64 ); let resource_usage = datastore.resource_usage_get(&opctx, *SILO_ID).await.unwrap(); assert_eq!( - resource_usage.physical_disk_bytes_provisioned, - 3 * disk_size.to_bytes() as i64 + resource_usage.virtual_disk_bytes_provisioned, + disk_size.to_bytes() as i64 ); let resource_usage = datastore.resource_usage_get(&opctx, *FLEET_ID).await.unwrap(); assert_eq!( - resource_usage.physical_disk_bytes_provisioned, - 3 * disk_size.to_bytes() as i64 + resource_usage.virtual_disk_bytes_provisioned, + disk_size.to_bytes() as i64 ); // Ask for a 1 gibibyte disk in the second project. @@ -1018,20 +1018,20 @@ async fn test_disk_resource_usage(cptestctx: &ControlPlaneTestContext) { let resource_usage = datastore.resource_usage_get(&opctx, project_id1).await.unwrap(); assert_eq!( - resource_usage.physical_disk_bytes_provisioned, - 3 * disk_size.to_bytes() as i64 + resource_usage.virtual_disk_bytes_provisioned, + disk_size.to_bytes() as i64 ); let resource_usage = datastore.resource_usage_get(&opctx, project_id2).await.unwrap(); assert_eq!( - resource_usage.physical_disk_bytes_provisioned, - 3 * disk_size.to_bytes() as i64 + resource_usage.virtual_disk_bytes_provisioned, + disk_size.to_bytes() as i64 ); let resource_usage = datastore.resource_usage_get(&opctx, org_id).await.unwrap(); assert_eq!( - resource_usage.physical_disk_bytes_provisioned, - 2 * 3 * disk_size.to_bytes() as i64 + resource_usage.virtual_disk_bytes_provisioned, + 2 * disk_size.to_bytes() as i64 ); // Delete the disk we just created, observe the utilization drop @@ -1045,17 +1045,17 @@ async fn test_disk_resource_usage(cptestctx: &ControlPlaneTestContext) { let resource_usage = datastore.resource_usage_get(&opctx, project_id1).await.unwrap(); assert_eq!( - resource_usage.physical_disk_bytes_provisioned, - 3 * disk_size.to_bytes() as i64 + resource_usage.virtual_disk_bytes_provisioned, + disk_size.to_bytes() as i64 ); let resource_usage = datastore.resource_usage_get(&opctx, project_id2).await.unwrap(); - assert_eq!(resource_usage.physical_disk_bytes_provisioned, 0); + assert_eq!(resource_usage.virtual_disk_bytes_provisioned, 0); let resource_usage = datastore.resource_usage_get(&opctx, org_id).await.unwrap(); assert_eq!( - resource_usage.physical_disk_bytes_provisioned, - 3 * disk_size.to_bytes() as i64 + resource_usage.virtual_disk_bytes_provisioned, + disk_size.to_bytes() as i64 ); } @@ -1380,7 +1380,7 @@ async fn test_disk_metrics(cptestctx: &ControlPlaneTestContext) { // Check the utilization info for the whole project too. let utilization_url = |id: Uuid| { format!( - "/system/metrics/physical_disk_space_provisioned?start_time={:?}&end_time={:?}&id={:?}", + "/system/metrics/virtual_disk_space_provisioned?start_time={:?}&end_time={:?}&id={:?}", Utc::now() - chrono::Duration::seconds(20), Utc::now() + chrono::Duration::seconds(20), id, diff --git a/nexus/tests/integration_tests/snapshots.rs b/nexus/tests/integration_tests/snapshots.rs index d77d5be406d..b96760c7d90 100644 --- a/nexus/tests/integration_tests/snapshots.rs +++ b/nexus/tests/integration_tests/snapshots.rs @@ -323,8 +323,8 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); let usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); assert_eq!( - usage.physical_disk_bytes_provisioned, - 3 * disk_size.to_bytes() as i64 + usage.virtual_disk_bytes_provisioned, + disk_size.to_bytes() as i64 ); // Issue snapshot request @@ -350,8 +350,8 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { assert_eq!(snapshot.size, base_disk.size); let usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); assert_eq!( - usage.physical_disk_bytes_provisioned, - 2 * 3 * disk_size.to_bytes() as i64 + usage.virtual_disk_bytes_provisioned, + 2 * disk_size.to_bytes() as i64 ); // Create a disk from this snapshot @@ -382,8 +382,8 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { let usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); assert_eq!( - usage.physical_disk_bytes_provisioned, - 3 * 3 * disk_size.to_bytes() as i64 + usage.virtual_disk_bytes_provisioned, + 3 * disk_size.to_bytes() as i64 ); // Delete snapshot @@ -403,8 +403,8 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { let usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); assert_eq!( - usage.physical_disk_bytes_provisioned, - 2 * 3 * disk_size.to_bytes() as i64 + usage.virtual_disk_bytes_provisioned, + 2 * disk_size.to_bytes() as i64 ); // Delete the disk using the snapshot @@ -416,8 +416,8 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .expect("failed to delete disk"); let usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); assert_eq!( - usage.physical_disk_bytes_provisioned, - 3 * disk_size.to_bytes() as i64 + usage.virtual_disk_bytes_provisioned, + disk_size.to_bytes() as i64 ); // Delete the original base disk @@ -428,7 +428,7 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .await .expect("failed to delete disk"); let usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); - assert_eq!(usage.physical_disk_bytes_provisioned, 0); + assert_eq!(usage.virtual_disk_bytes_provisioned, 0); } // Test the various ways Nexus can reject a disk created from a snapshot diff --git a/openapi/nexus.json b/openapi/nexus.json index cf3fef5205c..2839fb44d6a 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -12290,7 +12290,7 @@ "ResourceName": { "type": "string", "enum": [ - "physical_disk_space_provisioned", + "virtual_disk_space_provisioned", "cpus_provisioned", "ram_provisioned" ] From 2b30f2b7667e2985f6b810257228c8188064fa28 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 7 Oct 2022 17:08:06 -0400 Subject: [PATCH 20/80] fix endpoint --- nexus/tests/integration_tests/endpoints.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index e1c1fb9285f..46a7412c617 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -389,7 +389,7 @@ lazy_static! { pub static ref DEMO_SYSTEM_METRICS_URL: String = format!( - "/system/metrics/physical_disk_space_provisioned?start_time={:?}&end_time={:?}&id={}", + "/system/metrics/virtual_disk_space_provisioned?start_time={:?}&end_time={:?}&id={}", Utc::now(), Utc::now(), "3aaf22ae-5691-4f6d-b62c-aa532512fa78", From e0dd38553414bd346e37d110ac565faafe0aeb6d Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 7 Oct 2022 20:31:48 -0400 Subject: [PATCH 21/80] Make 'test_disk_metrics' less flaky --- nexus/tests/integration_tests/disks.rs | 28 +++++++------------------- 1 file changed, 7 insertions(+), 21 deletions(-) diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index 8a6685ed01c..92d67515873 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -1387,32 +1387,18 @@ async fn test_disk_metrics(cptestctx: &ControlPlaneTestContext) { ) }; - let get_i64 = |measurement: &oximeter::types::Measurement| -> i64 { - match measurement.datum() { - oximeter::types::Datum::I64(value) => *value, - _ => panic!("Unexpected datum type: {:?}", measurement.datum()), - } - }; - - // We should see two measurements: One when the project was created, and - // another once the disk modified the size. + // We should create measurements when the disk is created, and again when + // it's modified. However, due to our inability to control the sampling + // rate, we just keep polling until we see *something*. + // + // Normally we'll see two measurements, but it's possible to only see one + // if the producer interface is queried in between the two samples. let measurements = query_for_metrics_until_they_exist( client, &utilization_url(project_id), ) .await; - assert_eq!( - measurements.items.len(), - 2, - "Unexpected items: {:#?}", - measurements.items - ); - assert_eq!(get_i64(&measurements.items[0]), 0); - assert!( - get_i64(&measurements.items[1]) > 0, - "Unexpected items: {:#?}", - measurements.items - ); + assert!(!measurements.items.is_empty()); } #[nexus_test] From 613e820ece7244618fd3b8c167dd22059d27a8b1 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 7 Oct 2022 20:54:17 -0400 Subject: [PATCH 22/80] Add resource_type --- common/src/sql/dbinit.sql | 17 ++++++++--------- nexus/db-model/src/resource_usage.rs | 23 ++++++++++++++++++++++- nexus/db-model/src/schema.rs | 1 + nexus/src/db/datastore/organization.rs | 7 ++++++- nexus/src/db/datastore/project.rs | 8 ++++++-- nexus/src/db/datastore/silo.rs | 7 ++++--- nexus/src/populate.rs | 8 +++++++- 7 files changed, 54 insertions(+), 17 deletions(-) diff --git a/common/src/sql/dbinit.sql b/common/src/sql/dbinit.sql index 0b3db9aafed..08110f53849 100644 --- a/common/src/sql/dbinit.sql +++ b/common/src/sql/dbinit.sql @@ -139,17 +139,16 @@ CREATE INDEX ON omicron.public.service ( sled_id ); -/* - * A table describing resource usage which may be associated - * with a collection of objects, including: - * - Projects - * - Organizations - * - Silos - * - Fleet - */ +-- A table describing resource usage which may be associated +-- with a collection of objects, including: +-- - Projects +-- - Organizations +-- - Silos +-- - Fleet CREATE TABLE omicron.public.resource_usage ( - /* Should match the UUID of the corresponding collection */ + -- Should match the UUID of the corresponding collection id UUID PRIMARY KEY, + resource_type STRING(63) NOT NULL, -- The amount of physical disk space which has been provisioned -- on behalf of the collection. diff --git a/nexus/db-model/src/resource_usage.rs b/nexus/db-model/src/resource_usage.rs index 74d7836a525..b07ba7a5c36 100644 --- a/nexus/db-model/src/resource_usage.rs +++ b/nexus/db-model/src/resource_usage.rs @@ -5,11 +5,31 @@ use crate::schema::resource_usage; use uuid::Uuid; +#[derive(Debug)] +pub enum CollectionType { + Project, + Organization, + Silo, + Fleet, +} + +impl std::fmt::Display for CollectionType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + CollectionType::Project => write!(f, "project"), + CollectionType::Organization => write!(f, "organization"), + CollectionType::Silo => write!(f, "silo"), + CollectionType::Fleet => write!(f, "fleet"), + } + } +} + /// Describes resource_usage for a collection #[derive(Selectable, Queryable, Insertable, Debug)] #[diesel(table_name = resource_usage)] pub struct ResourceUsage { pub id: Uuid, + pub resource_type: String, pub virtual_disk_bytes_provisioned: i64, pub cpus_provisioned: i64, @@ -17,9 +37,10 @@ pub struct ResourceUsage { } impl ResourceUsage { - pub fn new(id: Uuid) -> Self { + pub fn new(id: Uuid, resource_type: CollectionType) -> Self { Self { id, + resource_type: resource_type.to_string(), virtual_disk_bytes_provisioned: 0, cpus_provisioned: 0, ram_provisioned: 0, diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 85a70780aea..033cff9ed1e 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -410,6 +410,7 @@ table! { table! { resource_usage { id -> Uuid, + resource_type -> Text, virtual_disk_bytes_provisioned -> Int8, cpus_provisioned -> Int8, ram_provisioned -> Int8, diff --git a/nexus/src/db/datastore/organization.rs b/nexus/src/db/datastore/organization.rs index d16fec24133..bae95977a2b 100644 --- a/nexus/src/db/datastore/organization.rs +++ b/nexus/src/db/datastore/organization.rs @@ -14,6 +14,7 @@ use crate::db::error::diesel_pool_result_optional; use crate::db::error::public_error_from_diesel_pool; use crate::db::error::ErrorHandler; use crate::db::identity::Resource; +use crate::db::model::CollectionType; use crate::db::model::Name; use crate::db::model::Organization; use crate::db::model::OrganizationUpdate; @@ -79,7 +80,11 @@ impl DataStore { // NOTE: if you do this before the org is created, it'll exist as // soon as the org does. However, that'll work better in a saga/CTE when // unwinding is built-in more naturally. - self.resource_usage_create(opctx, ResourceUsage::new(org.id())).await?; + self.resource_usage_create( + opctx, + ResourceUsage::new(org.id(), CollectionType::Organization), + ) + .await?; Ok(org) } diff --git a/nexus/src/db/datastore/project.rs b/nexus/src/db/datastore/project.rs index caf2c073f82..0c89f423e4f 100644 --- a/nexus/src/db/datastore/project.rs +++ b/nexus/src/db/datastore/project.rs @@ -13,6 +13,7 @@ use crate::db::collection_insert::DatastoreCollection; use crate::db::error::public_error_from_diesel_pool; use crate::db::error::ErrorHandler; use crate::db::identity::Resource; +use crate::db::model::CollectionType; use crate::db::model::Name; use crate::db::model::Organization; use crate::db::model::Project; @@ -70,8 +71,11 @@ impl DataStore { // NOTE: if you do this before the project is created, it'll exist as // soon as the project does. However, that'll work better in a saga/CTE when // unwinding is built-in more naturally. - self.resource_usage_create(opctx, ResourceUsage::new(project.id())) - .await?; + self.resource_usage_create( + opctx, + ResourceUsage::new(project.id(), CollectionType::Project), + ) + .await?; Ok(project) } diff --git a/nexus/src/db/datastore/silo.rs b/nexus/src/db/datastore/silo.rs index 93e939f9180..673af67036b 100644 --- a/nexus/src/db/datastore/silo.rs +++ b/nexus/src/db/datastore/silo.rs @@ -15,6 +15,7 @@ use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::fixed_data::silo::DEFAULT_SILO; use crate::db::identity::Resource; +use crate::db::model::CollectionType; use crate::db::model::Name; use crate::db::model::ResourceUsage; use crate::db::model::Silo; @@ -57,7 +58,7 @@ impl DataStore { self.resource_usage_create( opctx, - ResourceUsage::new(DEFAULT_SILO.id()), + ResourceUsage::new(DEFAULT_SILO.id(), CollectionType::Silo), ) .await?; @@ -148,13 +149,13 @@ impl DataStore { let silo = silo_create_query.get_result_async(&conn).await?; use db::schema::resource_usage::dsl; diesel::insert_into(dsl::resource_usage) - .values(ResourceUsage::new(silo.id())) + .values(ResourceUsage::new(silo.id(), CollectionType::Silo)) .execute_async(&conn) .await?; self.resource_usage_create_on_connection( &conn, - ResourceUsage::new(DEFAULT_SILO.id()), + ResourceUsage::new(DEFAULT_SILO.id(), CollectionType::Silo), ) .await?; diff --git a/nexus/src/populate.rs b/nexus/src/populate.rs index 40362250336..3723d02f7d3 100644 --- a/nexus/src/populate.rs +++ b/nexus/src/populate.rs @@ -282,7 +282,13 @@ impl Populator for PopulateFleet { let id = *db::fixed_data::FLEET_ID; datastore.fleet_insert(opctx, &db::model::Fleet::new(id)).await?; datastore - .resource_usage_create(opctx, db::model::ResourceUsage::new(id)) + .resource_usage_create( + opctx, + db::model::ResourceUsage::new( + id, + db::model::CollectionType::Fleet, + ), + ) .await?; Ok(()) From 0036869c754b7ea643c086ba5dcec8fee399b89e Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 7 Oct 2022 21:15:36 -0400 Subject: [PATCH 23/80] Safer conversions, less unwrap --- nexus/src/app/disk.rs | 6 +++++- nexus/src/app/instance.rs | 6 +++++- nexus/src/app/sagas/disk_create.rs | 12 ++++++++++-- nexus/src/app/sagas/disk_delete.rs | 17 +++++++++++++++-- nexus/src/app/sagas/instance_create.rs | 16 ++++++++++++++-- nexus/src/app/sagas/snapshot_create.rs | 16 ++++++++++++++-- 6 files changed, 63 insertions(+), 10 deletions(-) diff --git a/nexus/src/app/disk.rs b/nexus/src/app/disk.rs index a672f5f7837..fd86ea1a234 100644 --- a/nexus/src/app/disk.rs +++ b/nexus/src/app/disk.rs @@ -516,7 +516,11 @@ impl super::Nexus { .resource_usage_update_disk( &opctx, project.id(), - -i64::try_from(db_snapshot.size.to_bytes()).unwrap(), + -i64::try_from(db_snapshot.size.to_bytes()).map_err(|e| { + Error::internal_error(&format!( + "updating resource usage: {e}" + )) + })?, ) .await?; diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index 888924aadeb..4c65606f687 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -256,7 +256,11 @@ impl super::Nexus { project.id(), -i64::from(instance.runtime_state.ncpus.0 .0), -i64::try_from(instance.runtime_state.memory.to_bytes()) - .unwrap(), + .map_err(|e| { + Error::internal_error(&format!( + "updating resource usage: {e}" + )) + })?, ) .await?; self.db_datastore diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index a3044ca083c..f883516f324 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -288,7 +288,13 @@ async fn sdc_account_space( .resource_usage_update_disk( &opctx, params.project_id, - i64::try_from(disk_created.size.to_bytes()).unwrap(), + i64::try_from(disk_created.size.to_bytes()) + .map_err(|e| { + Error::internal_error(&format!( + "updating resource usage: {e}" + )) + }) + .map_err(ActionError::action_failed)?, ) .await .map_err(ActionError::action_failed)?; @@ -309,7 +315,9 @@ async fn sdc_account_space_undo( .resource_usage_update_disk( &opctx, params.project_id, - -i64::try_from(disk_created.size.to_bytes()).unwrap(), + -i64::try_from(disk_created.size.to_bytes()).map_err(|e| { + Error::internal_error(&format!("updating resource usage: {e}")) + })?, ) .await .map_err(ActionError::action_failed)?; diff --git a/nexus/src/app/sagas/disk_delete.rs b/nexus/src/app/sagas/disk_delete.rs index 5c7fb4fc7bf..0d176921f8b 100644 --- a/nexus/src/app/sagas/disk_delete.rs +++ b/nexus/src/app/sagas/disk_delete.rs @@ -10,6 +10,7 @@ use crate::authn; use crate::context::OpContext; use crate::db; use lazy_static::lazy_static; +use omicron_common::api::external::Error; use serde::Deserialize; use serde::Serialize; use std::sync::Arc; @@ -116,7 +117,13 @@ async fn sdd_account_space( .resource_usage_update_disk( &opctx, params.project_id, - -i64::try_from(deleted_disk.size.to_bytes()).unwrap(), + -i64::try_from(deleted_disk.size.to_bytes()) + .map_err(|e| { + Error::internal_error(&format!( + "updating resource usage: {e}" + )) + }) + .map_err(ActionError::action_failed)?, ) .await .map_err(ActionError::action_failed)?; @@ -137,7 +144,13 @@ async fn sdd_account_space_undo( .resource_usage_update_disk( &opctx, params.project_id, - i64::try_from(deleted_disk.size.to_bytes()).unwrap(), + i64::try_from(deleted_disk.size.to_bytes()) + .map_err(|e| { + Error::internal_error(&format!( + "updating resource usage: {e}" + )) + }) + .map_err(ActionError::action_failed)?, ) .await .map_err(ActionError::action_failed)?; diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index 00140cd07a4..cf8a22fda3d 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -866,7 +866,13 @@ async fn sic_account_resources( &opctx, params.project_id, i64::from(params.create_params.ncpus.0), - i64::try_from(params.create_params.memory.to_bytes()).unwrap(), + i64::try_from(params.create_params.memory.to_bytes()) + .map_err(|e| { + Error::internal_error(&format!( + "updating resource usage: {e}" + )) + }) + .map_err(ActionError::action_failed)?, ) .await .map_err(ActionError::action_failed)?; @@ -887,7 +893,13 @@ async fn sic_account_resources_undo( &opctx, params.project_id, -i64::from(params.create_params.ncpus.0), - -i64::try_from(params.create_params.memory.to_bytes()).unwrap(), + -i64::try_from(params.create_params.memory.to_bytes()) + .map_err(|e| { + Error::internal_error(&format!( + "updating resource usage: {e}" + )) + }) + .map_err(ActionError::action_failed)?, ) .await .map_err(ActionError::action_failed)?; diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index d405b48d7db..60973be5728 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -558,7 +558,13 @@ async fn ssc_account_space( .resource_usage_update_disk( &opctx, params.project_id, - i64::try_from(snapshot_created.size.to_bytes()).unwrap(), + i64::try_from(snapshot_created.size.to_bytes()) + .map_err(|e| { + Error::internal_error(&format!( + "updating resource usage: {e}" + )) + }) + .map_err(ActionError::action_failed)?, ) .await .map_err(ActionError::action_failed)?; @@ -580,7 +586,13 @@ async fn ssc_account_space_undo( .resource_usage_update_disk( &opctx, params.project_id, - -i64::try_from(snapshot_created.size.to_bytes()).unwrap(), + -i64::try_from(snapshot_created.size.to_bytes()) + .map_err(|e| { + Error::internal_error(&format!( + "updating resource usage: {e}" + )) + }) + .map_err(ActionError::action_failed)?, ) .await .map_err(ActionError::action_failed)?; From f3e9d7b504cae322801f3e6f430c98bc2962a5c9 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 7 Oct 2022 21:41:57 -0400 Subject: [PATCH 24/80] Use authz::project instead of project_id --- nexus/authz-macros/src/lib.rs | 2 +- nexus/db-model/src/update_artifact.rs | 13 +++++++++++-- nexus/src/app/disk.rs | 15 ++++++++------- nexus/src/app/sagas/disk_create.rs | 13 +++++-------- nexus/src/app/sagas/disk_delete.rs | 9 +++++---- nexus/src/app/sagas/snapshot_create.rs | 15 ++++++--------- nexus/src/app/volume.rs | 5 +++-- nexus/src/authz/api_resources.rs | 2 +- 8 files changed, 40 insertions(+), 34 deletions(-) diff --git a/nexus/authz-macros/src/lib.rs b/nexus/authz-macros/src/lib.rs index ed5b7f6e2d2..37b5abccc18 100644 --- a/nexus/authz-macros/src/lib.rs +++ b/nexus/authz-macros/src/lib.rs @@ -278,7 +278,7 @@ fn do_authz_resource( Ok(quote! { #[doc = #doc_struct] - #[derive(Clone, Debug)] + #[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] pub struct #resource_name { parent: #parent_resource_name, key: #primary_key_type, diff --git a/nexus/db-model/src/update_artifact.rs b/nexus/db-model/src/update_artifact.rs index 13615e045ec..37ce68d4c11 100644 --- a/nexus/db-model/src/update_artifact.rs +++ b/nexus/db-model/src/update_artifact.rs @@ -7,6 +7,7 @@ use crate::schema::update_available_artifact; use chrono::{DateTime, Utc}; use omicron_common::api::internal; use parse_display::Display; +use serde::{Deserialize, Serialize}; use std::io::Write; impl_enum_wrapper!( @@ -14,7 +15,7 @@ impl_enum_wrapper!( #[diesel(postgres_type(name = "update_artifact_kind"))] pub struct UpdateArtifactKindEnum; - #[derive(Clone, Copy, Debug, Display, AsExpression, FromSqlRow, PartialEq, Eq)] + #[derive(Clone, Copy, Debug, Display, AsExpression, FromSqlRow, PartialEq, Eq, Serialize, Deserialize)] #[display("{0}")] #[diesel(sql_type = UpdateArtifactKindEnum)] pub struct UpdateArtifactKind(pub internal::nexus::UpdateArtifactKind); @@ -24,7 +25,15 @@ impl_enum_wrapper!( ); #[derive( - Queryable, Insertable, Clone, Debug, Display, Selectable, AsChangeset, + Queryable, + Insertable, + Clone, + Debug, + Display, + Selectable, + AsChangeset, + Serialize, + Deserialize, )] #[diesel(table_name = update_available_artifact)] #[display("{kind} \"{name}\" v{version}")] diff --git a/nexus/src/app/disk.rs b/nexus/src/app/disk.rs index fd86ea1a234..e4234a71fc5 100644 --- a/nexus/src/app/disk.rs +++ b/nexus/src/app/disk.rs @@ -212,7 +212,7 @@ impl super::Nexus { let saga_params = sagas::disk_create::Params { serialized_authn: authn::saga::Serialized::for_opctx(opctx), - project_id: authz_project.id(), + project: authz_project, create_params: params.clone(), }; let saga_outputs = self @@ -387,7 +387,7 @@ impl super::Nexus { let saga_params = sagas::disk_delete::Params { serialized_authn: authn::saga::Serialized::for_opctx(opctx), - project_id: project.id(), + project, disk_id: authz_disk.id(), }; self.execute_saga::(saga_params) @@ -420,7 +420,7 @@ impl super::Nexus { let saga_params = sagas::snapshot_create::Params { serialized_authn: authn::saga::Serialized::for_opctx(opctx), silo_id: authz_silo.id(), - project_id: authz_project.id(), + project: authz_project, disk_id: authz_disk.id(), create_params: params.clone(), }; @@ -503,7 +503,7 @@ impl super::Nexus { // reference counting for volumes, and probably means this needs to // instead be a saga. - let (.., project, authz_snapshot, db_snapshot) = + let (.., authz_project, authz_snapshot, db_snapshot) = LookupPath::new(opctx, &self.db_datastore) .organization_name(organization_name) .project_name(project_name) @@ -515,7 +515,7 @@ impl super::Nexus { self.db_datastore .resource_usage_update_disk( &opctx, - project.id(), + authz_project.id(), -i64::try_from(db_snapshot.size.to_bytes()).map_err(|e| { Error::internal_error(&format!( "updating resource usage: {e}" @@ -529,9 +529,10 @@ impl super::Nexus { .await?; // Kick off volume deletion saga(s) - self.volume_delete(opctx, project.id(), db_snapshot.volume_id).await?; + self.volume_delete(opctx, &authz_project, db_snapshot.volume_id) + .await?; if let Some(volume_id) = db_snapshot.destination_volume_id { - self.volume_delete(opctx, project.id(), volume_id).await?; + self.volume_delete(opctx, &authz_project, volume_id).await?; } Ok(()) diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index f883516f324..71d1862ce09 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -30,7 +30,7 @@ use uuid::Uuid; #[derive(Debug, Deserialize, Serialize)] pub struct Params { pub serialized_authn: authn::saga::Serialized, - pub project_id: Uuid, + pub project: authz::Project, pub create_params: params::DiskCreate, } @@ -196,7 +196,7 @@ async fn sdc_create_disk_record( let disk = db::model::Disk::new( disk_id, - params.project_id, + params.project.id(), volume_id, params.create_params.clone(), block_size, @@ -287,7 +287,7 @@ async fn sdc_account_space( .datastore() .resource_usage_update_disk( &opctx, - params.project_id, + params.project.id(), i64::try_from(disk_created.size.to_bytes()) .map_err(|e| { Error::internal_error(&format!( @@ -314,7 +314,7 @@ async fn sdc_account_space_undo( .datastore() .resource_usage_update_disk( &opctx, - params.project_id, + params.project.id(), -i64::try_from(disk_created.size.to_bytes()).map_err(|e| { Error::internal_error(&format!("updating resource usage: {e}")) })?, @@ -541,10 +541,7 @@ async fn sdc_create_volume_record_undo( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); let volume_id = sagactx.lookup::("volume_id")?; - osagactx - .nexus() - .volume_delete(&opctx, params.project_id, volume_id) - .await?; + osagactx.nexus().volume_delete(&opctx, ¶ms.project, volume_id).await?; Ok(()) } diff --git a/nexus/src/app/sagas/disk_delete.rs b/nexus/src/app/sagas/disk_delete.rs index 0d176921f8b..c3c7c7815c3 100644 --- a/nexus/src/app/sagas/disk_delete.rs +++ b/nexus/src/app/sagas/disk_delete.rs @@ -7,6 +7,7 @@ use super::NexusActionContext; use super::NexusSaga; use crate::app::sagas::NexusAction; use crate::authn; +use crate::authz; use crate::context::OpContext; use crate::db; use lazy_static::lazy_static; @@ -25,7 +26,7 @@ use uuid::Uuid; #[derive(Debug, Deserialize, Serialize)] pub struct Params { pub serialized_authn: authn::saga::Serialized, - pub project_id: Uuid, + pub project: authz::Project, pub disk_id: Uuid, } @@ -116,7 +117,7 @@ async fn sdd_account_space( .datastore() .resource_usage_update_disk( &opctx, - params.project_id, + params.project.id(), -i64::try_from(deleted_disk.size.to_bytes()) .map_err(|e| { Error::internal_error(&format!( @@ -143,7 +144,7 @@ async fn sdd_account_space_undo( .datastore() .resource_usage_update_disk( &opctx, - params.project_id, + params.project.id(), i64::try_from(deleted_disk.size.to_bytes()) .map_err(|e| { Error::internal_error(&format!( @@ -167,7 +168,7 @@ async fn sdd_delete_volume( sagactx.lookup::("deleted_disk")?.volume_id; osagactx .nexus() - .volume_delete(&opctx, params.project_id, volume_id) + .volume_delete(&opctx, ¶ms.project, volume_id) .await .map_err(ActionError::action_failed)?; Ok(()) diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index 60973be5728..fb549e27688 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -118,7 +118,7 @@ use uuid::Uuid; pub struct Params { pub serialized_authn: authn::saga::Serialized, pub silo_id: Uuid, - pub project_id: Uuid, + pub project: authz::Project, pub disk_id: Uuid, pub create_params: params::SnapshotCreate, } @@ -450,7 +450,7 @@ async fn ssc_create_destination_volume_record_undo( sagactx.lookup::("destination_volume_id")?; osagactx .nexus() - .volume_delete(&opctx, params.project_id, destination_volume_id) + .volume_delete(&opctx, ¶ms.project, destination_volume_id) .await?; Ok(()) @@ -489,7 +489,7 @@ async fn ssc_create_snapshot_record( params.create_params.identity.clone(), ), - project_id: params.project_id, + project_id: params.project.id(), disk_id: disk.id(), volume_id, destination_volume_id: Some(destination_volume_id), @@ -557,7 +557,7 @@ async fn ssc_account_space( .datastore() .resource_usage_update_disk( &opctx, - params.project_id, + params.project.id(), i64::try_from(snapshot_created.size.to_bytes()) .map_err(|e| { Error::internal_error(&format!( @@ -585,7 +585,7 @@ async fn ssc_account_space_undo( .datastore() .resource_usage_update_disk( &opctx, - params.project_id, + params.project.id(), -i64::try_from(snapshot_created.size.to_bytes()) .map_err(|e| { Error::internal_error(&format!( @@ -878,10 +878,7 @@ async fn ssc_create_volume_record_undo( let volume_id = sagactx.lookup::("volume_id")?; info!(log, "deleting volume {}", volume_id); - osagactx - .nexus() - .volume_delete(&opctx, params.project_id, volume_id) - .await?; + osagactx.nexus().volume_delete(&opctx, ¶ms.project, volume_id).await?; Ok(()) } diff --git a/nexus/src/app/volume.rs b/nexus/src/app/volume.rs index 714604bcb13..bf6ecf036d4 100644 --- a/nexus/src/app/volume.rs +++ b/nexus/src/app/volume.rs @@ -6,6 +6,7 @@ use crate::app::sagas; use crate::authn; +use crate::authz; use crate::context::OpContext; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; @@ -27,12 +28,12 @@ impl super::Nexus { pub async fn volume_delete( self: &Arc, opctx: &OpContext, - project_id: Uuid, + project: &authz::Project, volume_id: Uuid, ) -> DeleteResult { let saga_params = sagas::volume_delete::Params { serialized_authn: authn::saga::Serialized::for_opctx(opctx), - project_id, + project_id: project.id(), volume_id, }; diff --git a/nexus/src/authz/api_resources.rs b/nexus/src/authz/api_resources.rs index ff8f166871f..e8672e626da 100644 --- a/nexus/src/authz/api_resources.rs +++ b/nexus/src/authz/api_resources.rs @@ -150,7 +150,7 @@ impl AuthorizedResource for T { /// This object is used for authorization checks on a Fleet by passing it as the /// `resource` argument to [`crate::context::OpContext::authorize()`]. You /// don't construct a `Fleet` yourself -- use the global [`FLEET`]. -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, Serialize, Deserialize)] pub struct Fleet; /// Singleton representing the [`Fleet`] itself for authz purposes pub const FLEET: Fleet = Fleet; From 4c237e18478a095f9ecf441f2d9a050d25020ad1 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 7 Oct 2022 21:47:38 -0400 Subject: [PATCH 25/80] Revert "Use authz::project instead of project_id" This reverts commit f3e9d7b504cae322801f3e6f430c98bc2962a5c9. --- nexus/authz-macros/src/lib.rs | 2 +- nexus/db-model/src/update_artifact.rs | 13 ++----------- nexus/src/app/disk.rs | 15 +++++++-------- nexus/src/app/sagas/disk_create.rs | 13 ++++++++----- nexus/src/app/sagas/disk_delete.rs | 9 ++++----- nexus/src/app/sagas/snapshot_create.rs | 15 +++++++++------ nexus/src/app/volume.rs | 5 ++--- nexus/src/authz/api_resources.rs | 2 +- 8 files changed, 34 insertions(+), 40 deletions(-) diff --git a/nexus/authz-macros/src/lib.rs b/nexus/authz-macros/src/lib.rs index 37b5abccc18..ed5b7f6e2d2 100644 --- a/nexus/authz-macros/src/lib.rs +++ b/nexus/authz-macros/src/lib.rs @@ -278,7 +278,7 @@ fn do_authz_resource( Ok(quote! { #[doc = #doc_struct] - #[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] + #[derive(Clone, Debug)] pub struct #resource_name { parent: #parent_resource_name, key: #primary_key_type, diff --git a/nexus/db-model/src/update_artifact.rs b/nexus/db-model/src/update_artifact.rs index 37ce68d4c11..13615e045ec 100644 --- a/nexus/db-model/src/update_artifact.rs +++ b/nexus/db-model/src/update_artifact.rs @@ -7,7 +7,6 @@ use crate::schema::update_available_artifact; use chrono::{DateTime, Utc}; use omicron_common::api::internal; use parse_display::Display; -use serde::{Deserialize, Serialize}; use std::io::Write; impl_enum_wrapper!( @@ -15,7 +14,7 @@ impl_enum_wrapper!( #[diesel(postgres_type(name = "update_artifact_kind"))] pub struct UpdateArtifactKindEnum; - #[derive(Clone, Copy, Debug, Display, AsExpression, FromSqlRow, PartialEq, Eq, Serialize, Deserialize)] + #[derive(Clone, Copy, Debug, Display, AsExpression, FromSqlRow, PartialEq, Eq)] #[display("{0}")] #[diesel(sql_type = UpdateArtifactKindEnum)] pub struct UpdateArtifactKind(pub internal::nexus::UpdateArtifactKind); @@ -25,15 +24,7 @@ impl_enum_wrapper!( ); #[derive( - Queryable, - Insertable, - Clone, - Debug, - Display, - Selectable, - AsChangeset, - Serialize, - Deserialize, + Queryable, Insertable, Clone, Debug, Display, Selectable, AsChangeset, )] #[diesel(table_name = update_available_artifact)] #[display("{kind} \"{name}\" v{version}")] diff --git a/nexus/src/app/disk.rs b/nexus/src/app/disk.rs index e4234a71fc5..fd86ea1a234 100644 --- a/nexus/src/app/disk.rs +++ b/nexus/src/app/disk.rs @@ -212,7 +212,7 @@ impl super::Nexus { let saga_params = sagas::disk_create::Params { serialized_authn: authn::saga::Serialized::for_opctx(opctx), - project: authz_project, + project_id: authz_project.id(), create_params: params.clone(), }; let saga_outputs = self @@ -387,7 +387,7 @@ impl super::Nexus { let saga_params = sagas::disk_delete::Params { serialized_authn: authn::saga::Serialized::for_opctx(opctx), - project, + project_id: project.id(), disk_id: authz_disk.id(), }; self.execute_saga::(saga_params) @@ -420,7 +420,7 @@ impl super::Nexus { let saga_params = sagas::snapshot_create::Params { serialized_authn: authn::saga::Serialized::for_opctx(opctx), silo_id: authz_silo.id(), - project: authz_project, + project_id: authz_project.id(), disk_id: authz_disk.id(), create_params: params.clone(), }; @@ -503,7 +503,7 @@ impl super::Nexus { // reference counting for volumes, and probably means this needs to // instead be a saga. - let (.., authz_project, authz_snapshot, db_snapshot) = + let (.., project, authz_snapshot, db_snapshot) = LookupPath::new(opctx, &self.db_datastore) .organization_name(organization_name) .project_name(project_name) @@ -515,7 +515,7 @@ impl super::Nexus { self.db_datastore .resource_usage_update_disk( &opctx, - authz_project.id(), + project.id(), -i64::try_from(db_snapshot.size.to_bytes()).map_err(|e| { Error::internal_error(&format!( "updating resource usage: {e}" @@ -529,10 +529,9 @@ impl super::Nexus { .await?; // Kick off volume deletion saga(s) - self.volume_delete(opctx, &authz_project, db_snapshot.volume_id) - .await?; + self.volume_delete(opctx, project.id(), db_snapshot.volume_id).await?; if let Some(volume_id) = db_snapshot.destination_volume_id { - self.volume_delete(opctx, &authz_project, volume_id).await?; + self.volume_delete(opctx, project.id(), volume_id).await?; } Ok(()) diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index 71d1862ce09..f883516f324 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -30,7 +30,7 @@ use uuid::Uuid; #[derive(Debug, Deserialize, Serialize)] pub struct Params { pub serialized_authn: authn::saga::Serialized, - pub project: authz::Project, + pub project_id: Uuid, pub create_params: params::DiskCreate, } @@ -196,7 +196,7 @@ async fn sdc_create_disk_record( let disk = db::model::Disk::new( disk_id, - params.project.id(), + params.project_id, volume_id, params.create_params.clone(), block_size, @@ -287,7 +287,7 @@ async fn sdc_account_space( .datastore() .resource_usage_update_disk( &opctx, - params.project.id(), + params.project_id, i64::try_from(disk_created.size.to_bytes()) .map_err(|e| { Error::internal_error(&format!( @@ -314,7 +314,7 @@ async fn sdc_account_space_undo( .datastore() .resource_usage_update_disk( &opctx, - params.project.id(), + params.project_id, -i64::try_from(disk_created.size.to_bytes()).map_err(|e| { Error::internal_error(&format!("updating resource usage: {e}")) })?, @@ -541,7 +541,10 @@ async fn sdc_create_volume_record_undo( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); let volume_id = sagactx.lookup::("volume_id")?; - osagactx.nexus().volume_delete(&opctx, ¶ms.project, volume_id).await?; + osagactx + .nexus() + .volume_delete(&opctx, params.project_id, volume_id) + .await?; Ok(()) } diff --git a/nexus/src/app/sagas/disk_delete.rs b/nexus/src/app/sagas/disk_delete.rs index c3c7c7815c3..0d176921f8b 100644 --- a/nexus/src/app/sagas/disk_delete.rs +++ b/nexus/src/app/sagas/disk_delete.rs @@ -7,7 +7,6 @@ use super::NexusActionContext; use super::NexusSaga; use crate::app::sagas::NexusAction; use crate::authn; -use crate::authz; use crate::context::OpContext; use crate::db; use lazy_static::lazy_static; @@ -26,7 +25,7 @@ use uuid::Uuid; #[derive(Debug, Deserialize, Serialize)] pub struct Params { pub serialized_authn: authn::saga::Serialized, - pub project: authz::Project, + pub project_id: Uuid, pub disk_id: Uuid, } @@ -117,7 +116,7 @@ async fn sdd_account_space( .datastore() .resource_usage_update_disk( &opctx, - params.project.id(), + params.project_id, -i64::try_from(deleted_disk.size.to_bytes()) .map_err(|e| { Error::internal_error(&format!( @@ -144,7 +143,7 @@ async fn sdd_account_space_undo( .datastore() .resource_usage_update_disk( &opctx, - params.project.id(), + params.project_id, i64::try_from(deleted_disk.size.to_bytes()) .map_err(|e| { Error::internal_error(&format!( @@ -168,7 +167,7 @@ async fn sdd_delete_volume( sagactx.lookup::("deleted_disk")?.volume_id; osagactx .nexus() - .volume_delete(&opctx, ¶ms.project, volume_id) + .volume_delete(&opctx, params.project_id, volume_id) .await .map_err(ActionError::action_failed)?; Ok(()) diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index fb549e27688..60973be5728 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -118,7 +118,7 @@ use uuid::Uuid; pub struct Params { pub serialized_authn: authn::saga::Serialized, pub silo_id: Uuid, - pub project: authz::Project, + pub project_id: Uuid, pub disk_id: Uuid, pub create_params: params::SnapshotCreate, } @@ -450,7 +450,7 @@ async fn ssc_create_destination_volume_record_undo( sagactx.lookup::("destination_volume_id")?; osagactx .nexus() - .volume_delete(&opctx, ¶ms.project, destination_volume_id) + .volume_delete(&opctx, params.project_id, destination_volume_id) .await?; Ok(()) @@ -489,7 +489,7 @@ async fn ssc_create_snapshot_record( params.create_params.identity.clone(), ), - project_id: params.project.id(), + project_id: params.project_id, disk_id: disk.id(), volume_id, destination_volume_id: Some(destination_volume_id), @@ -557,7 +557,7 @@ async fn ssc_account_space( .datastore() .resource_usage_update_disk( &opctx, - params.project.id(), + params.project_id, i64::try_from(snapshot_created.size.to_bytes()) .map_err(|e| { Error::internal_error(&format!( @@ -585,7 +585,7 @@ async fn ssc_account_space_undo( .datastore() .resource_usage_update_disk( &opctx, - params.project.id(), + params.project_id, -i64::try_from(snapshot_created.size.to_bytes()) .map_err(|e| { Error::internal_error(&format!( @@ -878,7 +878,10 @@ async fn ssc_create_volume_record_undo( let volume_id = sagactx.lookup::("volume_id")?; info!(log, "deleting volume {}", volume_id); - osagactx.nexus().volume_delete(&opctx, ¶ms.project, volume_id).await?; + osagactx + .nexus() + .volume_delete(&opctx, params.project_id, volume_id) + .await?; Ok(()) } diff --git a/nexus/src/app/volume.rs b/nexus/src/app/volume.rs index bf6ecf036d4..714604bcb13 100644 --- a/nexus/src/app/volume.rs +++ b/nexus/src/app/volume.rs @@ -6,7 +6,6 @@ use crate::app::sagas; use crate::authn; -use crate::authz; use crate::context::OpContext; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; @@ -28,12 +27,12 @@ impl super::Nexus { pub async fn volume_delete( self: &Arc, opctx: &OpContext, - project: &authz::Project, + project_id: Uuid, volume_id: Uuid, ) -> DeleteResult { let saga_params = sagas::volume_delete::Params { serialized_authn: authn::saga::Serialized::for_opctx(opctx), - project_id: project.id(), + project_id, volume_id, }; diff --git a/nexus/src/authz/api_resources.rs b/nexus/src/authz/api_resources.rs index e8672e626da..ff8f166871f 100644 --- a/nexus/src/authz/api_resources.rs +++ b/nexus/src/authz/api_resources.rs @@ -150,7 +150,7 @@ impl AuthorizedResource for T { /// This object is used for authorization checks on a Fleet by passing it as the /// `resource` argument to [`crate::context::OpContext::authorize()`]. You /// don't construct a `Fleet` yourself -- use the global [`FLEET`]. -#[derive(Clone, Copy, Debug, Serialize, Deserialize)] +#[derive(Clone, Copy, Debug)] pub struct Fleet; /// Singleton representing the [`Fleet`] itself for authz purposes pub const FLEET: Fleet = Fleet; From 9e3e6ead5982b5e7f3dea30006eacc2a55b798fa Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 7 Oct 2022 21:51:15 -0400 Subject: [PATCH 26/80] No project_id to volume_delete --- nexus/src/app/disk.rs | 4 ++-- nexus/src/app/sagas/disk_create.rs | 5 +---- nexus/src/app/sagas/disk_delete.rs | 2 +- nexus/src/app/sagas/snapshot_create.rs | 10 ++-------- nexus/src/app/sagas/volume_delete.rs | 1 - nexus/src/app/volume.rs | 2 -- 6 files changed, 6 insertions(+), 18 deletions(-) diff --git a/nexus/src/app/disk.rs b/nexus/src/app/disk.rs index fd86ea1a234..7089cb0102f 100644 --- a/nexus/src/app/disk.rs +++ b/nexus/src/app/disk.rs @@ -529,9 +529,9 @@ impl super::Nexus { .await?; // Kick off volume deletion saga(s) - self.volume_delete(opctx, project.id(), db_snapshot.volume_id).await?; + self.volume_delete(opctx, db_snapshot.volume_id).await?; if let Some(volume_id) = db_snapshot.destination_volume_id { - self.volume_delete(opctx, project.id(), volume_id).await?; + self.volume_delete(opctx, volume_id).await?; } Ok(()) diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index f883516f324..994a23bf351 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -541,10 +541,7 @@ async fn sdc_create_volume_record_undo( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); let volume_id = sagactx.lookup::("volume_id")?; - osagactx - .nexus() - .volume_delete(&opctx, params.project_id, volume_id) - .await?; + osagactx.nexus().volume_delete(&opctx, volume_id).await?; Ok(()) } diff --git a/nexus/src/app/sagas/disk_delete.rs b/nexus/src/app/sagas/disk_delete.rs index 0d176921f8b..9bf5c4be8ae 100644 --- a/nexus/src/app/sagas/disk_delete.rs +++ b/nexus/src/app/sagas/disk_delete.rs @@ -167,7 +167,7 @@ async fn sdd_delete_volume( sagactx.lookup::("deleted_disk")?.volume_id; osagactx .nexus() - .volume_delete(&opctx, params.project_id, volume_id) + .volume_delete(&opctx, volume_id) .await .map_err(ActionError::action_failed)?; Ok(()) diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index 60973be5728..ad4c7c67c0b 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -448,10 +448,7 @@ async fn ssc_create_destination_volume_record_undo( let destination_volume_id = sagactx.lookup::("destination_volume_id")?; - osagactx - .nexus() - .volume_delete(&opctx, params.project_id, destination_volume_id) - .await?; + osagactx.nexus().volume_delete(&opctx, destination_volume_id).await?; Ok(()) } @@ -878,10 +875,7 @@ async fn ssc_create_volume_record_undo( let volume_id = sagactx.lookup::("volume_id")?; info!(log, "deleting volume {}", volume_id); - osagactx - .nexus() - .volume_delete(&opctx, params.project_id, volume_id) - .await?; + osagactx.nexus().volume_delete(&opctx, volume_id).await?; Ok(()) } diff --git a/nexus/src/app/sagas/volume_delete.rs b/nexus/src/app/sagas/volume_delete.rs index 15044876946..4d99bbc2608 100644 --- a/nexus/src/app/sagas/volume_delete.rs +++ b/nexus/src/app/sagas/volume_delete.rs @@ -46,7 +46,6 @@ use uuid::Uuid; #[derive(Debug, Deserialize, Serialize)] pub struct Params { pub serialized_authn: authn::saga::Serialized, - pub project_id: Uuid, pub volume_id: Uuid, } // volume delete saga: actions diff --git a/nexus/src/app/volume.rs b/nexus/src/app/volume.rs index 714604bcb13..b81353b41e0 100644 --- a/nexus/src/app/volume.rs +++ b/nexus/src/app/volume.rs @@ -27,12 +27,10 @@ impl super::Nexus { pub async fn volume_delete( self: &Arc, opctx: &OpContext, - project_id: Uuid, volume_id: Uuid, ) -> DeleteResult { let saga_params = sagas::volume_delete::Params { serialized_authn: authn::saga::Serialized::for_opctx(opctx), - project_id, volume_id, }; From c0b7d851122efe448b2af086fa0de4f4a4880bf2 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 7 Oct 2022 22:07:46 -0400 Subject: [PATCH 27/80] Add test for idempotent fleet initialization --- nexus/src/db/datastore/mod.rs | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/nexus/src/db/datastore/mod.rs b/nexus/src/db/datastore/mod.rs index e50e7c7ac44..be080865335 100644 --- a/nexus/src/db/datastore/mod.rs +++ b/nexus/src/db/datastore/mod.rs @@ -251,6 +251,7 @@ mod test { use crate::db::model::BlockSize; use crate::db::model::Dataset; use crate::db::model::ExternalIp; + use crate::db::model::Fleet; use crate::db::model::Rack; use crate::db::model::Region; use crate::db::model::Service; @@ -1047,6 +1048,26 @@ mod test { logctx.cleanup_successful(); } + #[tokio::test] + async fn test_fleet_initialize_is_idempotent() { + let logctx = dev::test_setup_log("test_fleet_initialize_is_idempotent"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + // Create a Fleet, insert it into the DB. + let fleet = Fleet::new(Uuid::new_v4()); + let result = datastore.fleet_insert(&opctx, &fleet).await.unwrap(); + assert_eq!(result.id(), fleet.id()); + + // Re-insert the Fleet (check for idempotency). + let result2 = datastore.fleet_insert(&opctx, &fleet).await.unwrap(); + assert_eq!(result2.id(), fleet.id()); + assert_eq!(result2.time_modified(), result.time_modified()); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + #[tokio::test] async fn test_table_scan() { let logctx = dev::test_setup_log("test_table_scan"); From 83d8d9fc97ff8c8991e0e7f1fde898efe5e54506 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 7 Oct 2022 22:53:41 -0400 Subject: [PATCH 28/80] More transactions --- common/src/sql/dbinit.sql | 3 +- nexus/src/db/datastore/organization.rs | 78 +++++++++++++++----------- nexus/src/db/datastore/project.rs | 75 +++++++++++++++---------- 3 files changed, 91 insertions(+), 65 deletions(-) diff --git a/common/src/sql/dbinit.sql b/common/src/sql/dbinit.sql index 08110f53849..679f8ce46b8 100644 --- a/common/src/sql/dbinit.sql +++ b/common/src/sql/dbinit.sql @@ -146,8 +146,9 @@ CREATE INDEX ON omicron.public.service ( -- - Silos -- - Fleet CREATE TABLE omicron.public.resource_usage ( - -- Should match the UUID of the corresponding collection + -- Should match the UUID of the corresponding collection. id UUID PRIMARY KEY, + -- Identifies the type of the collection. resource_type STRING(63) NOT NULL, -- The amount of physical disk space which has been provisioned diff --git a/nexus/src/db/datastore/organization.rs b/nexus/src/db/datastore/organization.rs index bae95977a2b..97a9349d47e 100644 --- a/nexus/src/db/datastore/organization.rs +++ b/nexus/src/db/datastore/organization.rs @@ -13,6 +13,7 @@ use crate::db::collection_insert::DatastoreCollection; use crate::db::error::diesel_pool_result_optional; use crate::db::error::public_error_from_diesel_pool; use crate::db::error::ErrorHandler; +use crate::db::error::TransactionError; use crate::db::identity::Resource; use crate::db::model::CollectionType; use crate::db::model::Name; @@ -22,7 +23,7 @@ use crate::db::model::ResourceUsage; use crate::db::model::Silo; use crate::db::pagination::paginated; use crate::external_api::params; -use async_bb8_diesel::AsyncRunQueryDsl; +use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl}; use chrono::Utc; use diesel::prelude::*; use omicron_common::api::external::CreateResult; @@ -53,40 +54,51 @@ impl DataStore { let organization = Organization::new(organization.clone(), silo_id); let name = organization.name().as_str().to_string(); - let org = Silo::insert_resource( - silo_id, - diesel::insert_into(dsl::organization).values(organization), - ) - .insert_and_get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| match e { - AsyncInsertError::CollectionNotFound => Error::InternalError { - internal_message: format!( - "attempting to create an \ - organization under non-existent silo {}", - silo_id - ), - }, - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict(ResourceType::Organization, &name), + self.pool_authorized(opctx) + .await? + .transaction_async(|conn| async move { + let org = Silo::insert_resource( + silo_id, + diesel::insert_into(dsl::organization).values(organization), ) - } - })?; - - // Create resource usage for the org. - // - // NOTE: if you do this before the org is created, it'll exist as - // soon as the org does. However, that'll work better in a saga/CTE when - // unwinding is built-in more naturally. - self.resource_usage_create( - opctx, - ResourceUsage::new(org.id(), CollectionType::Organization), - ) - .await?; + .insert_and_get_result_async(&conn) + .await + .map_err(|e| match e { + AsyncInsertError::CollectionNotFound => { + Error::InternalError { + internal_message: format!( + "attempting to create an \ + organization under non-existent silo {}", + silo_id + ), + } + } + AsyncInsertError::DatabaseError(e) => { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict( + ResourceType::Organization, + &name, + ), + ) + } + })?; + + self.resource_usage_create_on_connection( + &conn, + ResourceUsage::new(org.id(), CollectionType::Organization), + ) + .await?; - Ok(org) + Ok(org) + }) + .await + .map_err(|e| match e { + TransactionError::CustomError(e) => e, + TransactionError::Pool(e) => { + public_error_from_diesel_pool(e, ErrorHandler::Server) + } + }) } /// Delete a organization diff --git a/nexus/src/db/datastore/project.rs b/nexus/src/db/datastore/project.rs index 0c89f423e4f..8056a149239 100644 --- a/nexus/src/db/datastore/project.rs +++ b/nexus/src/db/datastore/project.rs @@ -12,6 +12,7 @@ use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; use crate::db::error::public_error_from_diesel_pool; use crate::db::error::ErrorHandler; +use crate::db::error::TransactionError; use crate::db::identity::Resource; use crate::db::model::CollectionType; use crate::db::model::Name; @@ -20,7 +21,7 @@ use crate::db::model::Project; use crate::db::model::ProjectUpdate; use crate::db::model::ResourceUsage; use crate::db::pagination::paginated; -use async_bb8_diesel::AsyncRunQueryDsl; +use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl}; use chrono::Utc; use diesel::prelude::*; use omicron_common::api::external::CreateResult; @@ -47,37 +48,49 @@ impl DataStore { let name = project.name().as_str().to_string(); let organization_id = project.organization_id; - let project = Organization::insert_resource( - organization_id, - diesel::insert_into(dsl::project).values(project), - ) - .insert_and_get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| match e { - AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { - type_name: ResourceType::Organization, - lookup_type: LookupType::ById(organization_id), - }, - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict(ResourceType::Project, &name), + + self.pool_authorized(opctx) + .await? + .transaction_async(|conn| async move { + let project = Organization::insert_resource( + organization_id, + diesel::insert_into(dsl::project).values(project), + ) + .insert_and_get_result_async(&conn) + .await + .map_err(|e| match e { + AsyncInsertError::CollectionNotFound => { + Error::ObjectNotFound { + type_name: ResourceType::Organization, + lookup_type: LookupType::ById(organization_id), + } + } + AsyncInsertError::DatabaseError(e) => { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict( + ResourceType::Project, + &name, + ), + ) + } + })?; + + // Create resource usage for the project. + self.resource_usage_create_on_connection( + &conn, + ResourceUsage::new(project.id(), CollectionType::Project), ) - } - })?; - - // Create resource usage for the project. - // - // NOTE: if you do this before the project is created, it'll exist as - // soon as the project does. However, that'll work better in a saga/CTE when - // unwinding is built-in more naturally. - self.resource_usage_create( - opctx, - ResourceUsage::new(project.id(), CollectionType::Project), - ) - .await?; - - Ok(project) + .await?; + Ok(project) + }) + .await + .map_err(|e| match e { + TransactionError::CustomError(e) => e, + TransactionError::Pool(e) => { + public_error_from_diesel_pool(e, ErrorHandler::Server) + } + }) } /// Delete a project From 95bdaff87bab515331dce675a965d85f04126474 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 7 Oct 2022 22:56:39 -0400 Subject: [PATCH 29/80] updated comment --- nexus/src/db/datastore/resource_usage.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nexus/src/db/datastore/resource_usage.rs b/nexus/src/db/datastore/resource_usage.rs index 2fe13d67054..2bf7af7e98d 100644 --- a/nexus/src/db/datastore/resource_usage.rs +++ b/nexus/src/db/datastore/resource_usage.rs @@ -20,8 +20,9 @@ use oximeter::{types::Sample, Metric, MetricsError, Target}; use std::sync::{Arc, Mutex}; use uuid::Uuid; -/// A collection which holds resources (such as a project, organization, or -/// silo). +/// Describes a collection that holds other resources. +/// +/// Example targets might include projects, organizations, silos or fleets. #[derive(Debug, Clone, Target)] struct CollectionTarget { id: Uuid, From f05296e79298bb205cdd440a0730b104eed4b032 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 7 Oct 2022 23:11:45 -0400 Subject: [PATCH 30/80] Add 404 handling for resource usage --- common/src/api/external/mod.rs | 1 + nexus/src/db/datastore/resource_usage.rs | 13 ++++++++++--- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index 63ccfca1771..f93dd86abf4 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -559,6 +559,7 @@ pub enum ResourceType { RouterRoute, Oximeter, MetricProducer, + ResourceUsage, RoleBuiltin, UpdateAvailableArtifact, UserBuiltin, diff --git a/nexus/src/db/datastore/resource_usage.rs b/nexus/src/db/datastore/resource_usage.rs index 2bf7af7e98d..b8c7d081070 100644 --- a/nexus/src/db/datastore/resource_usage.rs +++ b/nexus/src/db/datastore/resource_usage.rs @@ -14,8 +14,9 @@ use crate::db::pool::DbConnection; use crate::db::queries::resource_usage_update::ResourceUsageUpdate; use async_bb8_diesel::{AsyncRunQueryDsl, PoolError}; use diesel::prelude::*; -use omicron_common::api::external::DeleteResult; -use omicron_common::api::external::Error; +use omicron_common::api::external::{ + DeleteResult, Error, LookupType, ResourceType, +}; use oximeter::{types::Sample, Metric, MetricsError, Target}; use std::sync::{Arc, Mutex}; use uuid::Uuid; @@ -161,7 +162,13 @@ impl DataStore { .get_result_async(self.pool_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::ResourceUsage, + LookupType::ById(id), + ), + ) })?; Ok(resource_usage) } From 9a758c2b0612ba692664ca66e4141beed32af4d0 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 7 Oct 2022 23:31:32 -0400 Subject: [PATCH 31/80] comments --- nexus/src/db/datastore/resource_usage.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/nexus/src/db/datastore/resource_usage.rs b/nexus/src/db/datastore/resource_usage.rs index b8c7d081070..978a6723b01 100644 --- a/nexus/src/db/datastore/resource_usage.rs +++ b/nexus/src/db/datastore/resource_usage.rs @@ -47,8 +47,14 @@ struct RamProvisioned { bytes: i64, } +/// An oximeter producer for reporting [`ResourceUsage`] information to Clickhouse. +/// +/// This producer collects samples whenever the database record for a collection +/// is created or updated. This implies that the CockroachDB record is always +/// kept up-to-date, and the Clickhouse historical records are batched and +/// transmitted once they are collected (as is the norm for Clickhouse metrics). #[derive(Debug, Default, Clone)] -pub struct Producer { +pub(crate) struct Producer { samples: Arc>>, } @@ -110,7 +116,7 @@ impl oximeter::Producer for Producer { } impl DataStore { - /// Create a resource_usage + /// Create a [`ResourceUsage`] object. pub async fn resource_usage_create( &self, opctx: &OpContext, @@ -173,7 +179,7 @@ impl DataStore { Ok(resource_usage) } - /// Delete a resource_usage + /// Delete a [`ResourceUsage`] object. pub async fn resource_usage_delete( &self, opctx: &OpContext, From 8d153f6277e82436363181377266760c72356e50 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 7 Oct 2022 23:37:37 -0400 Subject: [PATCH 32/80] http entrypoints naming --- nexus/src/external_api/http_entrypoints.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index e400aafa7f0..2cabe3b25a9 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -241,7 +241,7 @@ pub fn external_api() -> NexusApiDescription { api.register(system_image_view_by_id)?; api.register(system_image_delete)?; - api.register(system_metrics_list)?; + api.register(system_metric)?; api.register(updates_refresh)?; api.register(user_list)?; @@ -4027,7 +4027,7 @@ async fn updates_refresh( // Metrics #[derive(Debug, Deserialize, JsonSchema)] -pub struct ResourceUtilization { +pub struct SystemMetricParams { #[serde(flatten)] pub pagination: dropshot::PaginationParams< params::ResourceMetrics, @@ -4059,10 +4059,10 @@ struct SystemMetricsPathParam { path = "/system/metrics/{resource_name}", tags = ["system"], }] -async fn system_metrics_list( +async fn system_metric( rqctx: Arc>>, path_params: Path, - query_params: Query, + query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let nexus = &apictx.nexus; From 91a1b9dc7c5e4fbf047b4c548f2c19f5f9f8f4b4 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Sat, 8 Oct 2022 00:05:59 -0400 Subject: [PATCH 33/80] More endpoint updates --- nexus/src/external_api/http_entrypoints.rs | 16 ++++++++-------- nexus/tests/output/nexus_tags.txt | 2 +- openapi/nexus.json | 10 +++++----- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 2cabe3b25a9..321f85ef1f2 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -4042,7 +4042,7 @@ pub struct SystemMetricParams { #[derive(Display, Deserialize, JsonSchema)] #[display(style = "snake_case")] #[serde(rename_all = "snake_case")] -pub enum ResourceName { +pub enum SystemMetricName { VirtualDiskSpaceProvisioned, CpusProvisioned, RamProvisioned, @@ -4050,13 +4050,13 @@ pub enum ResourceName { #[derive(Deserialize, JsonSchema)] struct SystemMetricsPathParam { - resource_name: ResourceName, + metric_name: SystemMetricName, } /// Access metrics data #[endpoint { method = GET, - path = "/system/metrics/{resource_name}", + path = "/system/metrics/{metric_name}", tags = ["system"], }] async fn system_metric( @@ -4066,7 +4066,7 @@ async fn system_metric( ) -> Result>, HttpError> { let apictx = rqctx.context(); let nexus = &apictx.nexus; - let resource_name = path_params.into_inner().resource_name; + let metric_name = path_params.into_inner().metric_name; let query = query_params.into_inner(); let limit = rqctx.page_limit(&query.pagination)?; @@ -4074,8 +4074,8 @@ async fn system_metric( let handler = async { let opctx = OpContext::for_external_api(&rqctx).await?; - let result = match resource_name { - ResourceName::VirtualDiskSpaceProvisioned => { + let result = match metric_name { + SystemMetricName::VirtualDiskSpaceProvisioned => { opctx.authorize(authz::Action::Read, &authz::FLEET).await?; nexus .select_timeseries( @@ -4086,7 +4086,7 @@ async fn system_metric( ) .await? } - ResourceName::CpusProvisioned => { + SystemMetricName::CpusProvisioned => { opctx.authorize(authz::Action::Read, &authz::FLEET).await?; nexus .select_timeseries( @@ -4097,7 +4097,7 @@ async fn system_metric( ) .await? } - ResourceName::RamProvisioned => { + SystemMetricName::RamProvisioned => { opctx.authorize(authz::Action::Read, &authz::FLEET).await?; nexus .select_timeseries( diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index 0ebcbe3baab..a65080a63af 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -145,7 +145,7 @@ system_image_delete /system/images/{image_name} system_image_list /system/images system_image_view /system/images/{image_name} system_image_view_by_id /system/by-id/images/{id} -system_metrics_list /system/metrics/{resource_name} +system_metric /system/metrics/{metric_name} system_user_list /system/user system_user_view /system/user/{user_name} updates_refresh /system/updates/refresh diff --git a/openapi/nexus.json b/openapi/nexus.json index 2839fb44d6a..42d93b277a3 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -6357,20 +6357,20 @@ } } }, - "/system/metrics/{resource_name}": { + "/system/metrics/{metric_name}": { "get": { "tags": [ "system" ], "summary": "Access metrics data", - "operationId": "system_metrics_list", + "operationId": "system_metric", "parameters": [ { "in": "path", - "name": "resource_name", + "name": "metric_name", "required": true, "schema": { - "$ref": "#/components/schemas/ResourceName" + "$ref": "#/components/schemas/SystemMetricName" }, "style": "simple" }, @@ -12287,7 +12287,7 @@ } ] }, - "ResourceName": { + "SystemMetricName": { "type": "string", "enum": [ "virtual_disk_space_provisioned", From c69363082a87e125f0fa743c1ecfb3949753ad10 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Sun, 9 Oct 2022 09:11:40 -0700 Subject: [PATCH 34/80] rename 'ResourceUsage' to 'VirtualResourceProvisioning' --- common/src/api/external/mod.rs | 2 +- common/src/sql/dbinit.sql | 6 +- nexus/db-model/src/lib.rs | 4 +- nexus/db-model/src/queries/mod.rs | 2 +- ...> virtual_resource_provisioning_update.rs} | 4 +- nexus/db-model/src/schema.rs | 4 +- ...ge.rs => virtual_resource_provisioning.rs} | 16 +-- nexus/src/app/disk.rs | 2 +- nexus/src/app/instance.rs | 2 +- nexus/src/app/sagas/disk_create.rs | 4 +- nexus/src/app/sagas/disk_delete.rs | 4 +- nexus/src/app/sagas/instance_create.rs | 4 +- nexus/src/app/sagas/snapshot_create.rs | 4 +- nexus/src/db/datastore/mod.rs | 12 +- nexus/src/db/datastore/organization.rs | 9 +- nexus/src/db/datastore/project.rs | 9 +- nexus/src/db/datastore/silo.rs | 25 ++-- ...ge.rs => virtual_resource_provisioning.rs} | 111 ++++++++------- nexus/src/db/queries/mod.rs | 2 +- ...> virtual_resource_provisioning_update.rs} | 30 ++-- nexus/src/populate.rs | 4 +- nexus/tests/integration_tests/disks.rs | 132 +++++++++++------- nexus/tests/integration_tests/instances.rs | 40 +++--- nexus/tests/integration_tests/snapshots.rs | 30 +++- 24 files changed, 276 insertions(+), 186 deletions(-) rename nexus/db-model/src/queries/{resource_usage_update.rs => virtual_resource_provisioning_update.rs} (90%) rename nexus/db-model/src/{resource_usage.rs => virtual_resource_provisioning.rs} (72%) rename nexus/src/db/datastore/{resource_usage.rs => virtual_resource_provisioning.rs} (63%) rename nexus/src/db/queries/{resource_usage_update.rs => virtual_resource_provisioning_update.rs} (87%) diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index f93dd86abf4..f02d1acfdef 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -559,10 +559,10 @@ pub enum ResourceType { RouterRoute, Oximeter, MetricProducer, - ResourceUsage, RoleBuiltin, UpdateAvailableArtifact, UserBuiltin, + VirtualResourceProvisioning, Zpool, } diff --git a/common/src/sql/dbinit.sql b/common/src/sql/dbinit.sql index 679f8ce46b8..a5e811aa759 100644 --- a/common/src/sql/dbinit.sql +++ b/common/src/sql/dbinit.sql @@ -139,17 +139,17 @@ CREATE INDEX ON omicron.public.service ( sled_id ); --- A table describing resource usage which may be associated +-- A table describing virtual resource provisioning which may be associated -- with a collection of objects, including: -- - Projects -- - Organizations -- - Silos -- - Fleet -CREATE TABLE omicron.public.resource_usage ( +CREATE TABLE omicron.public.virtual_resource_provisioning ( -- Should match the UUID of the corresponding collection. id UUID PRIMARY KEY, -- Identifies the type of the collection. - resource_type STRING(63) NOT NULL, + collection_type STRING(63) NOT NULL, -- The amount of physical disk space which has been provisioned -- on behalf of the collection. diff --git a/nexus/db-model/src/lib.rs b/nexus/db-model/src/lib.rs index 1727706ca0e..658cee221eb 100644 --- a/nexus/db-model/src/lib.rs +++ b/nexus/db-model/src/lib.rs @@ -47,7 +47,6 @@ pub mod queries; mod rack; mod region; mod region_snapshot; -mod resource_usage; mod role_assignment; mod role_builtin; pub mod saga_types; @@ -63,6 +62,7 @@ mod ssh_key; mod u16; mod update_artifact; mod user_builtin; +mod virtual_resource_provisioning; mod vni; mod volume; mod vpc; @@ -115,7 +115,6 @@ pub use project::*; pub use rack::*; pub use region::*; pub use region_snapshot::*; -pub use resource_usage::*; pub use role_assignment::*; pub use role_builtin::*; pub use service::*; @@ -128,6 +127,7 @@ pub use snapshot::*; pub use ssh_key::*; pub use update_artifact::*; pub use user_builtin::*; +pub use virtual_resource_provisioning::*; pub use vni::*; pub use volume::*; pub use vpc::*; diff --git a/nexus/db-model/src/queries/mod.rs b/nexus/db-model/src/queries/mod.rs index b3e867009d8..b9fa7c46086 100644 --- a/nexus/db-model/src/queries/mod.rs +++ b/nexus/db-model/src/queries/mod.rs @@ -5,4 +5,4 @@ //! Subqueries used in CTEs. pub mod region_allocation; -pub mod resource_usage_update; +pub mod virtual_resource_provisioning_update; diff --git a/nexus/db-model/src/queries/resource_usage_update.rs b/nexus/db-model/src/queries/virtual_resource_provisioning_update.rs similarity index 90% rename from nexus/db-model/src/queries/resource_usage_update.rs rename to nexus/db-model/src/queries/virtual_resource_provisioning_update.rs index f221ddd117b..18fcf086e80 100644 --- a/nexus/db-model/src/queries/resource_usage_update.rs +++ b/nexus/db-model/src/queries/virtual_resource_provisioning_update.rs @@ -5,8 +5,8 @@ //! Describes the resource usage update CTE use crate::schema::organization; -use crate::schema::resource_usage; use crate::schema::silo; +use crate::schema::virtual_resource_provisioning; table! { parent_org { @@ -36,7 +36,7 @@ diesel::allow_tables_to_appear_in_same_query!(organization, parent_org,); diesel::allow_tables_to_appear_in_same_query!(silo, parent_silo,); diesel::allow_tables_to_appear_in_same_query!( - resource_usage, + virtual_resource_provisioning, parent_org, parent_silo, parent_fleet, diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 033cff9ed1e..fe56bc10132 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -408,9 +408,9 @@ table! { } table! { - resource_usage { + virtual_resource_provisioning { id -> Uuid, - resource_type -> Text, + collection_type -> Text, virtual_disk_bytes_provisioned -> Int8, cpus_provisioned -> Int8, ram_provisioned -> Int8, diff --git a/nexus/db-model/src/resource_usage.rs b/nexus/db-model/src/virtual_resource_provisioning.rs similarity index 72% rename from nexus/db-model/src/resource_usage.rs rename to nexus/db-model/src/virtual_resource_provisioning.rs index b07ba7a5c36..55591c6fc30 100644 --- a/nexus/db-model/src/resource_usage.rs +++ b/nexus/db-model/src/virtual_resource_provisioning.rs @@ -2,7 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use crate::schema::resource_usage; +use crate::schema::virtual_resource_provisioning; use uuid::Uuid; #[derive(Debug)] @@ -24,23 +24,23 @@ impl std::fmt::Display for CollectionType { } } -/// Describes resource_usage for a collection +/// Describes virtual_resource_provisioning for a collection #[derive(Selectable, Queryable, Insertable, Debug)] -#[diesel(table_name = resource_usage)] -pub struct ResourceUsage { +#[diesel(table_name = virtual_resource_provisioning)] +pub struct VirtualResourceProvisioning { pub id: Uuid, - pub resource_type: String, + pub collection_type: String, pub virtual_disk_bytes_provisioned: i64, pub cpus_provisioned: i64, pub ram_provisioned: i64, } -impl ResourceUsage { - pub fn new(id: Uuid, resource_type: CollectionType) -> Self { +impl VirtualResourceProvisioning { + pub fn new(id: Uuid, collection_type: CollectionType) -> Self { Self { id, - resource_type: resource_type.to_string(), + collection_type: collection_type.to_string(), virtual_disk_bytes_provisioned: 0, cpus_provisioned: 0, ram_provisioned: 0, diff --git a/nexus/src/app/disk.rs b/nexus/src/app/disk.rs index 7089cb0102f..4dd0b439252 100644 --- a/nexus/src/app/disk.rs +++ b/nexus/src/app/disk.rs @@ -513,7 +513,7 @@ impl super::Nexus { // TODO: This should exist within a saga self.db_datastore - .resource_usage_update_disk( + .virtual_resource_provisioning_update_disk( &opctx, project.id(), -i64::try_from(db_snapshot.size.to_bytes()).map_err(|e| { diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index 4c65606f687..358a44e1aec 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -251,7 +251,7 @@ impl super::Nexus { .project_delete_instance(opctx, &authz_instance) .await?; self.db_datastore - .resource_usage_update_cpus_and_ram( + .virtual_resource_provisioning_update_cpus_and_ram( &opctx, project.id(), -i64::from(instance.runtime_state.ncpus.0 .0), diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index 994a23bf351..2fe691e3706 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -285,7 +285,7 @@ async fn sdc_account_space( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .resource_usage_update_disk( + .virtual_resource_provisioning_update_disk( &opctx, params.project_id, i64::try_from(disk_created.size.to_bytes()) @@ -312,7 +312,7 @@ async fn sdc_account_space_undo( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .resource_usage_update_disk( + .virtual_resource_provisioning_update_disk( &opctx, params.project_id, -i64::try_from(disk_created.size.to_bytes()).map_err(|e| { diff --git a/nexus/src/app/sagas/disk_delete.rs b/nexus/src/app/sagas/disk_delete.rs index 9bf5c4be8ae..dcb08804a36 100644 --- a/nexus/src/app/sagas/disk_delete.rs +++ b/nexus/src/app/sagas/disk_delete.rs @@ -114,7 +114,7 @@ async fn sdd_account_space( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .resource_usage_update_disk( + .virtual_resource_provisioning_update_disk( &opctx, params.project_id, -i64::try_from(deleted_disk.size.to_bytes()) @@ -141,7 +141,7 @@ async fn sdd_account_space_undo( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .resource_usage_update_disk( + .virtual_resource_provisioning_update_disk( &opctx, params.project_id, i64::try_from(deleted_disk.size.to_bytes()) diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index cf8a22fda3d..10aa54ba9a2 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -862,7 +862,7 @@ async fn sic_account_resources( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .resource_usage_update_cpus_and_ram( + .virtual_resource_provisioning_update_cpus_and_ram( &opctx, params.project_id, i64::from(params.create_params.ncpus.0), @@ -889,7 +889,7 @@ async fn sic_account_resources_undo( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .resource_usage_update_cpus_and_ram( + .virtual_resource_provisioning_update_cpus_and_ram( &opctx, params.project_id, -i64::from(params.create_params.ncpus.0), diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index ad4c7c67c0b..4757a0f00f5 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -552,7 +552,7 @@ async fn ssc_account_space( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .resource_usage_update_disk( + .virtual_resource_provisioning_update_disk( &opctx, params.project_id, i64::try_from(snapshot_created.size.to_bytes()) @@ -580,7 +580,7 @@ async fn ssc_account_space_undo( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .resource_usage_update_disk( + .virtual_resource_provisioning_update_disk( &opctx, params.project_id, -i64::try_from(snapshot_created.size.to_bytes()) diff --git a/nexus/src/db/datastore/mod.rs b/nexus/src/db/datastore/mod.rs index be080865335..17d02255ce6 100644 --- a/nexus/src/db/datastore/mod.rs +++ b/nexus/src/db/datastore/mod.rs @@ -58,7 +58,6 @@ mod project; mod rack; mod region; mod region_snapshot; -mod resource_usage; mod role; mod saga; mod service; @@ -69,6 +68,7 @@ mod sled; mod snapshot; mod ssh_key; mod update; +mod virtual_resource_provisioning; mod volume; mod vpc; mod zpool; @@ -106,7 +106,8 @@ impl RunnableQuery for T where pub struct DataStore { pool: Arc, - resource_usage_producer: resource_usage::Producer, + virtual_resource_provisioning_producer: + virtual_resource_provisioning::Producer, } // The majority of `DataStore`'s methods live in our submodules as a concession @@ -116,13 +117,16 @@ impl DataStore { pub fn new(pool: Arc) -> Self { DataStore { pool, - resource_usage_producer: resource_usage::Producer::new(), + virtual_resource_provisioning_producer: + virtual_resource_provisioning::Producer::new(), } } pub fn register_producers(&self, registry: &ProducerRegistry) { registry - .register_producer(self.resource_usage_producer.clone()) + .register_producer( + self.virtual_resource_provisioning_producer.clone(), + ) .unwrap(); } diff --git a/nexus/src/db/datastore/organization.rs b/nexus/src/db/datastore/organization.rs index 97a9349d47e..00395a79d48 100644 --- a/nexus/src/db/datastore/organization.rs +++ b/nexus/src/db/datastore/organization.rs @@ -19,8 +19,8 @@ use crate::db::model::CollectionType; use crate::db::model::Name; use crate::db::model::Organization; use crate::db::model::OrganizationUpdate; -use crate::db::model::ResourceUsage; use crate::db::model::Silo; +use crate::db::model::VirtualResourceProvisioning; use crate::db::pagination::paginated; use crate::external_api::params; use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl}; @@ -84,9 +84,12 @@ impl DataStore { } })?; - self.resource_usage_create_on_connection( + self.virtual_resource_provisioning_create_on_connection( &conn, - ResourceUsage::new(org.id(), CollectionType::Organization), + VirtualResourceProvisioning::new( + org.id(), + CollectionType::Organization, + ), ) .await?; diff --git a/nexus/src/db/datastore/project.rs b/nexus/src/db/datastore/project.rs index 8056a149239..bbac606d176 100644 --- a/nexus/src/db/datastore/project.rs +++ b/nexus/src/db/datastore/project.rs @@ -19,7 +19,7 @@ use crate::db::model::Name; use crate::db::model::Organization; use crate::db::model::Project; use crate::db::model::ProjectUpdate; -use crate::db::model::ResourceUsage; +use crate::db::model::VirtualResourceProvisioning; use crate::db::pagination::paginated; use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl}; use chrono::Utc; @@ -77,9 +77,12 @@ impl DataStore { })?; // Create resource usage for the project. - self.resource_usage_create_on_connection( + self.virtual_resource_provisioning_create_on_connection( &conn, - ResourceUsage::new(project.id(), CollectionType::Project), + VirtualResourceProvisioning::new( + project.id(), + CollectionType::Project, + ), ) .await?; Ok(project) diff --git a/nexus/src/db/datastore/silo.rs b/nexus/src/db/datastore/silo.rs index 673af67036b..bde99cbca6a 100644 --- a/nexus/src/db/datastore/silo.rs +++ b/nexus/src/db/datastore/silo.rs @@ -17,8 +17,8 @@ use crate::db::fixed_data::silo::DEFAULT_SILO; use crate::db::identity::Resource; use crate::db::model::CollectionType; use crate::db::model::Name; -use crate::db::model::ResourceUsage; use crate::db::model::Silo; +use crate::db::model::VirtualResourceProvisioning; use crate::db::pagination::paginated; use crate::external_api::params; use crate::external_api::shared; @@ -56,9 +56,12 @@ impl DataStore { })?; info!(opctx.log, "created {} built-in silos", count); - self.resource_usage_create( + self.virtual_resource_provisioning_create( opctx, - ResourceUsage::new(DEFAULT_SILO.id(), CollectionType::Silo), + VirtualResourceProvisioning::new( + DEFAULT_SILO.id(), + CollectionType::Silo, + ), ) .await?; @@ -147,15 +150,21 @@ impl DataStore { .await? .transaction_async(|conn| async move { let silo = silo_create_query.get_result_async(&conn).await?; - use db::schema::resource_usage::dsl; - diesel::insert_into(dsl::resource_usage) - .values(ResourceUsage::new(silo.id(), CollectionType::Silo)) + use db::schema::virtual_resource_provisioning::dsl; + diesel::insert_into(dsl::virtual_resource_provisioning) + .values(VirtualResourceProvisioning::new( + silo.id(), + CollectionType::Silo, + )) .execute_async(&conn) .await?; - self.resource_usage_create_on_connection( + self.virtual_resource_provisioning_create_on_connection( &conn, - ResourceUsage::new(DEFAULT_SILO.id(), CollectionType::Silo), + VirtualResourceProvisioning::new( + DEFAULT_SILO.id(), + CollectionType::Silo, + ), ) .await?; diff --git a/nexus/src/db/datastore/resource_usage.rs b/nexus/src/db/datastore/virtual_resource_provisioning.rs similarity index 63% rename from nexus/src/db/datastore/resource_usage.rs rename to nexus/src/db/datastore/virtual_resource_provisioning.rs index 978a6723b01..a7c4bf63721 100644 --- a/nexus/src/db/datastore/resource_usage.rs +++ b/nexus/src/db/datastore/virtual_resource_provisioning.rs @@ -2,16 +2,16 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! [`DataStore`] methods on [`ResourceUsage`]s. +//! [`DataStore`] methods on [`VirtualResourceProvisioning`]s. use super::DataStore; use crate::context::OpContext; use crate::db; use crate::db::error::public_error_from_diesel_pool; use crate::db::error::ErrorHandler; -use crate::db::model::ResourceUsage; +use crate::db::model::VirtualResourceProvisioning; use crate::db::pool::DbConnection; -use crate::db::queries::resource_usage_update::ResourceUsageUpdate; +use crate::db::queries::virtual_resource_provisioning_update::VirtualResourceProvisioningUpdate; use async_bb8_diesel::{AsyncRunQueryDsl, PoolError}; use diesel::prelude::*; use omicron_common::api::external::{ @@ -47,7 +47,7 @@ struct RamProvisioned { bytes: i64, } -/// An oximeter producer for reporting [`ResourceUsage`] information to Clickhouse. +/// An oximeter producer for reporting [`VirtualResourceProvisioning`] information to Clickhouse. /// /// This producer collects samples whenever the database record for a collection /// is created or updated. This implies that the CockroachDB record is always @@ -63,7 +63,7 @@ impl Producer { Self { samples: Arc::new(Mutex::new(vec![])) } } - fn append_disk_metrics(&self, usages: &Vec) { + fn append_disk_metrics(&self, usages: &Vec) { let new_samples = usages .iter() .map(|usage| { @@ -79,7 +79,7 @@ impl Producer { self.append(new_samples); } - fn append_cpu_metrics(&self, usages: &Vec) { + fn append_cpu_metrics(&self, usages: &Vec) { let new_samples = usages .iter() .map(|usage| { @@ -116,31 +116,37 @@ impl oximeter::Producer for Producer { } impl DataStore { - /// Create a [`ResourceUsage`] object. - pub async fn resource_usage_create( + /// Create a [`VirtualResourceProvisioning`] object. + pub async fn virtual_resource_provisioning_create( &self, opctx: &OpContext, - resource_usage: ResourceUsage, - ) -> Result, Error> { + virtual_resource_provisioning: VirtualResourceProvisioning, + ) -> Result, Error> { let pool = self.pool_authorized(opctx).await?; - self.resource_usage_create_on_connection(pool, resource_usage).await + self.virtual_resource_provisioning_create_on_connection( + pool, + virtual_resource_provisioning, + ) + .await } - pub(crate) async fn resource_usage_create_on_connection( + pub(crate) async fn virtual_resource_provisioning_create_on_connection< + ConnErr, + >( &self, conn: &(impl async_bb8_diesel::AsyncConnection + Sync), - resource_usage: ResourceUsage, - ) -> Result, Error> + virtual_resource_provisioning: VirtualResourceProvisioning, + ) -> Result, Error> where ConnErr: From + Send + 'static, PoolError: From, { - use db::schema::resource_usage::dsl; + use db::schema::virtual_resource_provisioning::dsl; - let usages: Vec = - diesel::insert_into(dsl::resource_usage) - .values(resource_usage) + let usages: Vec = + diesel::insert_into(dsl::virtual_resource_provisioning) + .values(virtual_resource_provisioning) .on_conflict_do_nothing() .get_results_async(conn) .await @@ -150,44 +156,45 @@ impl DataStore { ErrorHandler::Server, ) })?; - self.resource_usage_producer.append_disk_metrics(&usages); - self.resource_usage_producer.append_cpu_metrics(&usages); + self.virtual_resource_provisioning_producer + .append_disk_metrics(&usages); + self.virtual_resource_provisioning_producer.append_cpu_metrics(&usages); Ok(usages) } - pub async fn resource_usage_get( + pub async fn virtual_resource_provisioning_get( &self, opctx: &OpContext, id: Uuid, - ) -> Result { - use db::schema::resource_usage::dsl; + ) -> Result { + use db::schema::virtual_resource_provisioning::dsl; - let resource_usage = dsl::resource_usage + let virtual_resource_provisioning = dsl::virtual_resource_provisioning .find(id) - .select(ResourceUsage::as_select()) + .select(VirtualResourceProvisioning::as_select()) .get_result_async(self.pool_authorized(opctx).await?) .await .map_err(|e| { public_error_from_diesel_pool( e, ErrorHandler::NotFoundByLookup( - ResourceType::ResourceUsage, + ResourceType::VirtualResourceProvisioning, LookupType::ById(id), ), ) })?; - Ok(resource_usage) + Ok(virtual_resource_provisioning) } - /// Delete a [`ResourceUsage`] object. - pub async fn resource_usage_delete( + /// Delete a [`VirtualResourceProvisioning`] object. + pub async fn virtual_resource_provisioning_delete( &self, opctx: &OpContext, id: Uuid, ) -> DeleteResult { - use db::schema::resource_usage::dsl; + use db::schema::virtual_resource_provisioning::dsl; - diesel::delete(dsl::resource_usage) + diesel::delete(dsl::virtual_resource_provisioning) .filter(dsl::id.eq(id)) .execute_async(self.pool_authorized(opctx).await?) .await @@ -198,38 +205,42 @@ impl DataStore { } /// Transitively updates all provisioned disk usage from project -> fleet. - pub async fn resource_usage_update_disk( + pub async fn virtual_resource_provisioning_update_disk( &self, opctx: &OpContext, project_id: Uuid, disk_byte_diff: i64, - ) -> Result, Error> { - let usages = - ResourceUsageUpdate::new_update_disk(project_id, disk_byte_diff) - .get_results_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; - self.resource_usage_producer.append_disk_metrics(&usages); + ) -> Result, Error> { + let usages = VirtualResourceProvisioningUpdate::new_update_disk( + project_id, + disk_byte_diff, + ) + .get_results_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; + self.virtual_resource_provisioning_producer + .append_disk_metrics(&usages); Ok(usages) } /// Transitively updates all CPU/RAM usage from project -> fleet. - pub async fn resource_usage_update_cpus_and_ram( + pub async fn virtual_resource_provisioning_update_cpus_and_ram( &self, opctx: &OpContext, project_id: Uuid, cpus_diff: i64, ram_diff: i64, - ) -> Result, Error> { - let usages = ResourceUsageUpdate::new_update_cpus_and_ram( - project_id, cpus_diff, ram_diff, - ) - .get_results_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; - self.resource_usage_producer.append_cpu_metrics(&usages); + ) -> Result, Error> { + let usages = + VirtualResourceProvisioningUpdate::new_update_cpus_and_ram( + project_id, cpus_diff, ram_diff, + ) + .get_results_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool(e, ErrorHandler::Server) + })?; + self.virtual_resource_provisioning_producer.append_cpu_metrics(&usages); Ok(usages) } } diff --git a/nexus/src/db/queries/mod.rs b/nexus/src/db/queries/mod.rs index c6a8e60efb3..dcda0e3d4d9 100644 --- a/nexus/src/db/queries/mod.rs +++ b/nexus/src/db/queries/mod.rs @@ -11,6 +11,6 @@ pub mod ip_pool; mod next_item; pub mod network_interface; pub mod region_allocation; -pub mod resource_usage_update; +pub mod virtual_resource_provisioning_update; pub mod vpc; pub mod vpc_subnet; diff --git a/nexus/src/db/queries/resource_usage_update.rs b/nexus/src/db/queries/virtual_resource_provisioning_update.rs similarity index 87% rename from nexus/src/db/queries/resource_usage_update.rs rename to nexus/src/db/queries/virtual_resource_provisioning_update.rs index c5d82a1a7fa..740cd1e935a 100644 --- a/nexus/src/db/queries/resource_usage_update.rs +++ b/nexus/src/db/queries/virtual_resource_provisioning_update.rs @@ -5,9 +5,9 @@ //! Implementation of queries for updating resource usage info. use crate::db::alias::ExpressionAlias; -use crate::db::model::ResourceUsage; +use crate::db::model::VirtualResourceProvisioning; use crate::db::pool::DbConnection; -use crate::db::schema::resource_usage; +use crate::db::schema::virtual_resource_provisioning; use crate::db::subquery::{AsQuerySource, Cte, CteBuilder, CteQuery}; use db_macros::Subquery; use diesel::pg::Pg; @@ -16,7 +16,7 @@ use diesel::{ sql_types, CombineDsl, ExpressionMethods, IntoSql, QueryDsl, RunQueryDsl, SelectableHelper, }; -use nexus_db_model::queries::resource_usage_update::{ +use nexus_db_model::queries::virtual_resource_provisioning_update::{ all_collections, parent_fleet, parent_org, parent_silo, }; @@ -130,11 +130,11 @@ impl AllCollections { /// Constructs a CTE for updating resource usage information in all /// collections for a particular object. #[derive(QueryId)] -pub struct ResourceUsageUpdate { +pub struct VirtualResourceProvisioningUpdate { cte: Cte, } -impl ResourceUsageUpdate { +impl VirtualResourceProvisioningUpdate { // Generic utility for updating all collections including this resource, // even transitively. // @@ -145,7 +145,7 @@ impl ResourceUsageUpdate { // - Fleet fn apply_update(project_id: uuid::Uuid, values: V) -> Self where - V: diesel::AsChangeset, + V: diesel::AsChangeset, ::Changeset: QueryFragment + Send + 'static, { @@ -159,15 +159,15 @@ impl ResourceUsageUpdate { &parent_fleet, ); - use resource_usage::dsl; + use virtual_resource_provisioning::dsl; let final_update = Box::new( - diesel::update(dsl::resource_usage) + diesel::update(dsl::virtual_resource_provisioning) .set(values) .filter(dsl::id.eq_any( all_collections.query_source().select(all_collections::id), )) - .returning(ResourceUsage::as_returning()), + .returning(VirtualResourceProvisioning::as_returning()), ); let cte = CteBuilder::new() @@ -184,7 +184,7 @@ impl ResourceUsageUpdate { project_id: uuid::Uuid, disk_bytes_diff: i64, ) -> Self { - use resource_usage::dsl; + use virtual_resource_provisioning::dsl; Self::apply_update( project_id, dsl::virtual_disk_bytes_provisioned @@ -197,7 +197,7 @@ impl ResourceUsageUpdate { cpus_diff: i64, ram_diff: i64, ) -> Self { - use resource_usage::dsl; + use virtual_resource_provisioning::dsl; Self::apply_update( project_id, ( @@ -208,7 +208,7 @@ impl ResourceUsageUpdate { } } -impl QueryFragment for ResourceUsageUpdate { +impl QueryFragment for VirtualResourceProvisioningUpdate { fn walk_ast<'a>( &'a self, mut out: AstPass<'_, 'a, Pg>, @@ -224,8 +224,8 @@ type SelectableSql = < >::SelectExpression as diesel::Expression >::SqlType; -impl Query for ResourceUsageUpdate { - type SqlType = SelectableSql; +impl Query for VirtualResourceProvisioningUpdate { + type SqlType = SelectableSql; } -impl RunQueryDsl for ResourceUsageUpdate {} +impl RunQueryDsl for VirtualResourceProvisioningUpdate {} diff --git a/nexus/src/populate.rs b/nexus/src/populate.rs index 3723d02f7d3..22a60b0fd66 100644 --- a/nexus/src/populate.rs +++ b/nexus/src/populate.rs @@ -282,9 +282,9 @@ impl Populator for PopulateFleet { let id = *db::fixed_data::FLEET_ID; datastore.fleet_insert(opctx, &db::model::Fleet::new(id)).await?; datastore - .resource_usage_create( + .virtual_resource_provisioning_create( opctx, - db::model::ResourceUsage::new( + db::model::VirtualResourceProvisioning::new( id, db::model::CollectionType::Fleet, ), diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index 92d67515873..0aa84841eb6 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -900,7 +900,9 @@ async fn test_disk_too_big(cptestctx: &ControlPlaneTestContext) { } #[nexus_test] -async fn test_disk_resource_usage(cptestctx: &ControlPlaneTestContext) { +async fn test_disk_virtual_resource_provisioning( + cptestctx: &ControlPlaneTestContext, +) { let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx.nexus; let datastore = nexus.datastore(); @@ -918,21 +920,31 @@ async fn test_disk_resource_usage(cptestctx: &ControlPlaneTestContext) { OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); // The project and organization should start as empty. - let resource_usage = - datastore.resource_usage_get(&opctx, project_id1).await.unwrap(); - assert_eq!(resource_usage.virtual_disk_bytes_provisioned, 0); - let resource_usage = - datastore.resource_usage_get(&opctx, project_id2).await.unwrap(); - assert_eq!(resource_usage.virtual_disk_bytes_provisioned, 0); - let resource_usage = - datastore.resource_usage_get(&opctx, org_id).await.unwrap(); - assert_eq!(resource_usage.virtual_disk_bytes_provisioned, 0); - let resource_usage = - datastore.resource_usage_get(&opctx, *SILO_ID).await.unwrap(); - assert_eq!(resource_usage.virtual_disk_bytes_provisioned, 0); - let resource_usage = - datastore.resource_usage_get(&opctx, *FLEET_ID).await.unwrap(); - assert_eq!(resource_usage.virtual_disk_bytes_provisioned, 0); + let virtual_resource_provisioning = datastore + .virtual_resource_provisioning_get(&opctx, project_id1) + .await + .unwrap(); + assert_eq!(virtual_resource_provisioning.virtual_disk_bytes_provisioned, 0); + let virtual_resource_provisioning = datastore + .virtual_resource_provisioning_get(&opctx, project_id2) + .await + .unwrap(); + assert_eq!(virtual_resource_provisioning.virtual_disk_bytes_provisioned, 0); + let virtual_resource_provisioning = datastore + .virtual_resource_provisioning_get(&opctx, org_id) + .await + .unwrap(); + assert_eq!(virtual_resource_provisioning.virtual_disk_bytes_provisioned, 0); + let virtual_resource_provisioning = datastore + .virtual_resource_provisioning_get(&opctx, *SILO_ID) + .await + .unwrap(); + assert_eq!(virtual_resource_provisioning.virtual_disk_bytes_provisioned, 0); + let virtual_resource_provisioning = datastore + .virtual_resource_provisioning_get(&opctx, *FLEET_ID) + .await + .unwrap(); + assert_eq!(virtual_resource_provisioning.virtual_disk_bytes_provisioned, 0); // Ask for a 1 gibibyte disk in the first project. // @@ -960,31 +972,41 @@ async fn test_disk_resource_usage(cptestctx: &ControlPlaneTestContext) { .execute() .await .expect("unexpected failure creating 1 GiB disk"); - let resource_usage = - datastore.resource_usage_get(&opctx, project_id1).await.unwrap(); + let virtual_resource_provisioning = datastore + .virtual_resource_provisioning_get(&opctx, project_id1) + .await + .unwrap(); assert_eq!( - resource_usage.virtual_disk_bytes_provisioned, + virtual_resource_provisioning.virtual_disk_bytes_provisioned, disk_size.to_bytes() as i64 ); - let resource_usage = - datastore.resource_usage_get(&opctx, project_id2).await.unwrap(); - assert_eq!(resource_usage.virtual_disk_bytes_provisioned, 0); - let resource_usage = - datastore.resource_usage_get(&opctx, org_id).await.unwrap(); + let virtual_resource_provisioning = datastore + .virtual_resource_provisioning_get(&opctx, project_id2) + .await + .unwrap(); + assert_eq!(virtual_resource_provisioning.virtual_disk_bytes_provisioned, 0); + let virtual_resource_provisioning = datastore + .virtual_resource_provisioning_get(&opctx, org_id) + .await + .unwrap(); assert_eq!( - resource_usage.virtual_disk_bytes_provisioned, + virtual_resource_provisioning.virtual_disk_bytes_provisioned, disk_size.to_bytes() as i64 ); - let resource_usage = - datastore.resource_usage_get(&opctx, *SILO_ID).await.unwrap(); + let virtual_resource_provisioning = datastore + .virtual_resource_provisioning_get(&opctx, *SILO_ID) + .await + .unwrap(); assert_eq!( - resource_usage.virtual_disk_bytes_provisioned, + virtual_resource_provisioning.virtual_disk_bytes_provisioned, disk_size.to_bytes() as i64 ); - let resource_usage = - datastore.resource_usage_get(&opctx, *FLEET_ID).await.unwrap(); + let virtual_resource_provisioning = datastore + .virtual_resource_provisioning_get(&opctx, *FLEET_ID) + .await + .unwrap(); assert_eq!( - resource_usage.virtual_disk_bytes_provisioned, + virtual_resource_provisioning.virtual_disk_bytes_provisioned, disk_size.to_bytes() as i64 ); @@ -1015,22 +1037,28 @@ async fn test_disk_resource_usage(cptestctx: &ControlPlaneTestContext) { .execute() .await .expect("unexpected failure creating 1 GiB disk"); - let resource_usage = - datastore.resource_usage_get(&opctx, project_id1).await.unwrap(); + let virtual_resource_provisioning = datastore + .virtual_resource_provisioning_get(&opctx, project_id1) + .await + .unwrap(); assert_eq!( - resource_usage.virtual_disk_bytes_provisioned, + virtual_resource_provisioning.virtual_disk_bytes_provisioned, disk_size.to_bytes() as i64 ); - let resource_usage = - datastore.resource_usage_get(&opctx, project_id2).await.unwrap(); + let virtual_resource_provisioning = datastore + .virtual_resource_provisioning_get(&opctx, project_id2) + .await + .unwrap(); assert_eq!( - resource_usage.virtual_disk_bytes_provisioned, + virtual_resource_provisioning.virtual_disk_bytes_provisioned, disk_size.to_bytes() as i64 ); - let resource_usage = - datastore.resource_usage_get(&opctx, org_id).await.unwrap(); + let virtual_resource_provisioning = datastore + .virtual_resource_provisioning_get(&opctx, org_id) + .await + .unwrap(); assert_eq!( - resource_usage.virtual_disk_bytes_provisioned, + virtual_resource_provisioning.virtual_disk_bytes_provisioned, 2 * disk_size.to_bytes() as i64 ); @@ -1042,19 +1070,25 @@ async fn test_disk_resource_usage(cptestctx: &ControlPlaneTestContext) { .execute() .await .expect("failed to delete disk"); - let resource_usage = - datastore.resource_usage_get(&opctx, project_id1).await.unwrap(); + let virtual_resource_provisioning = datastore + .virtual_resource_provisioning_get(&opctx, project_id1) + .await + .unwrap(); assert_eq!( - resource_usage.virtual_disk_bytes_provisioned, + virtual_resource_provisioning.virtual_disk_bytes_provisioned, disk_size.to_bytes() as i64 ); - let resource_usage = - datastore.resource_usage_get(&opctx, project_id2).await.unwrap(); - assert_eq!(resource_usage.virtual_disk_bytes_provisioned, 0); - let resource_usage = - datastore.resource_usage_get(&opctx, org_id).await.unwrap(); + let virtual_resource_provisioning = datastore + .virtual_resource_provisioning_get(&opctx, project_id2) + .await + .unwrap(); + assert_eq!(virtual_resource_provisioning.virtual_disk_bytes_provisioned, 0); + let virtual_resource_provisioning = datastore + .virtual_resource_provisioning_get(&opctx, org_id) + .await + .unwrap(); assert_eq!( - resource_usage.virtual_disk_bytes_provisioned, + virtual_resource_provisioning.virtual_disk_bytes_provisioned, disk_size.to_bytes() as i64 ); } diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 4522ac5ad9d..2e6e102380f 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -469,20 +469,24 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); - let resource_usage = - datastore.resource_usage_get(&opctx, project_id).await.unwrap(); - assert_eq!(resource_usage.cpus_provisioned, 0); - assert_eq!(resource_usage.ram_provisioned, 0); + let virtual_resource_provisioning = datastore + .virtual_resource_provisioning_get(&opctx, project_id) + .await + .unwrap(); + assert_eq!(virtual_resource_provisioning.cpus_provisioned, 0); + assert_eq!(virtual_resource_provisioning.ram_provisioned, 0); // Create an instance. let instance_url = format!("{}/just-rainsticks", url_instances); create_instance(client, ORGANIZATION_NAME, PROJECT_NAME, "just-rainsticks") .await; - let resource_usage = - datastore.resource_usage_get(&opctx, project_id).await.unwrap(); - assert_eq!(resource_usage.cpus_provisioned, 4); + let virtual_resource_provisioning = datastore + .virtual_resource_provisioning_get(&opctx, project_id) + .await + .unwrap(); + assert_eq!(virtual_resource_provisioning.cpus_provisioned, 4); assert_eq!( - resource_usage.ram_provisioned, + virtual_resource_provisioning.ram_provisioned, i64::try_from(ByteCount::from_gibibytes_u32(1).to_bytes()).unwrap(), ); @@ -499,11 +503,13 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { // However, for implementation reasons, this is complicated (we have a // tendency to update the runtime without checking the prior state, which // makes edge-triggered behavior trickier to notice). - let resource_usage = - datastore.resource_usage_get(&opctx, project_id).await.unwrap(); - assert_eq!(resource_usage.cpus_provisioned, 4); + let virtual_resource_provisioning = datastore + .virtual_resource_provisioning_get(&opctx, project_id) + .await + .unwrap(); + assert_eq!(virtual_resource_provisioning.cpus_provisioned, 4); assert_eq!( - resource_usage.ram_provisioned, + virtual_resource_provisioning.ram_provisioned, i64::try_from(ByteCount::from_gibibytes_u32(1).to_bytes()).unwrap(), ); @@ -514,10 +520,12 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { .await .unwrap(); - let resource_usage = - datastore.resource_usage_get(&opctx, project_id).await.unwrap(); - assert_eq!(resource_usage.cpus_provisioned, 0); - assert_eq!(resource_usage.ram_provisioned, 0); + let virtual_resource_provisioning = datastore + .virtual_resource_provisioning_get(&opctx, project_id) + .await + .unwrap(); + assert_eq!(virtual_resource_provisioning.cpus_provisioned, 0); + assert_eq!(virtual_resource_provisioning.ram_provisioned, 0); } #[nexus_test] diff --git a/nexus/tests/integration_tests/snapshots.rs b/nexus/tests/integration_tests/snapshots.rs index b96760c7d90..4d943fd9936 100644 --- a/nexus/tests/integration_tests/snapshots.rs +++ b/nexus/tests/integration_tests/snapshots.rs @@ -321,7 +321,10 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); - let usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); + let usage = datastore + .virtual_resource_provisioning_get(&opctx, project_id) + .await + .unwrap(); assert_eq!( usage.virtual_disk_bytes_provisioned, disk_size.to_bytes() as i64 @@ -348,7 +351,10 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { assert_eq!(snapshot.disk_id, base_disk.identity.id); assert_eq!(snapshot.size, base_disk.size); - let usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); + let usage = datastore + .virtual_resource_provisioning_get(&opctx, project_id) + .await + .unwrap(); assert_eq!( usage.virtual_disk_bytes_provisioned, 2 * disk_size.to_bytes() as i64 @@ -380,7 +386,10 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .parsed_body() .unwrap(); - let usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); + let usage = datastore + .virtual_resource_provisioning_get(&opctx, project_id) + .await + .unwrap(); assert_eq!( usage.virtual_disk_bytes_provisioned, 3 * disk_size.to_bytes() as i64 @@ -401,7 +410,10 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .await .unwrap(); - let usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); + let usage = datastore + .virtual_resource_provisioning_get(&opctx, project_id) + .await + .unwrap(); assert_eq!( usage.virtual_disk_bytes_provisioned, 2 * disk_size.to_bytes() as i64 @@ -414,7 +426,10 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .execute() .await .expect("failed to delete disk"); - let usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); + let usage = datastore + .virtual_resource_provisioning_get(&opctx, project_id) + .await + .unwrap(); assert_eq!( usage.virtual_disk_bytes_provisioned, disk_size.to_bytes() as i64 @@ -427,7 +442,10 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .execute() .await .expect("failed to delete disk"); - let usage = datastore.resource_usage_get(&opctx, project_id).await.unwrap(); + let usage = datastore + .virtual_resource_provisioning_get(&opctx, project_id) + .await + .unwrap(); assert_eq!(usage.virtual_disk_bytes_provisioned, 0); } From eaf9fb39150f0d6eb8104860db2f4b0d08bf3686 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 7 Dec 2022 08:54:37 -0500 Subject: [PATCH 35/80] usage -> Provision, update openapi --- .../virtual_resource_provisioning_update.rs | 2 +- nexus/src/app/disk.rs | 2 +- nexus/src/app/instance.rs | 2 +- nexus/src/app/project.rs | 6 +- nexus/src/app/sagas/disk_create.rs | 6 +- nexus/src/app/sagas/disk_delete.rs | 4 +- nexus/src/app/sagas/instance_create.rs | 4 +- nexus/src/app/sagas/snapshot_create.rs | 4 +- nexus/src/db/datastore/project.rs | 2 +- .../virtual_resource_provisioning.rs | 58 +- .../virtual_resource_provisioning_update.rs | 4 +- nexus/tests/integration_tests/snapshots.rs | 24 +- openapi/nexus.json | 729 ++++++++++++++++-- 13 files changed, 743 insertions(+), 104 deletions(-) diff --git a/nexus/db-model/src/queries/virtual_resource_provisioning_update.rs b/nexus/db-model/src/queries/virtual_resource_provisioning_update.rs index 18fcf086e80..7ddce271be7 100644 --- a/nexus/db-model/src/queries/virtual_resource_provisioning_update.rs +++ b/nexus/db-model/src/queries/virtual_resource_provisioning_update.rs @@ -2,7 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! Describes the resource usage update CTE +//! Describes the resource provisioning update CTE use crate::schema::organization; use crate::schema::silo; diff --git a/nexus/src/app/disk.rs b/nexus/src/app/disk.rs index 4dd0b439252..8cea3bbd193 100644 --- a/nexus/src/app/disk.rs +++ b/nexus/src/app/disk.rs @@ -518,7 +518,7 @@ impl super::Nexus { project.id(), -i64::try_from(db_snapshot.size.to_bytes()).map_err(|e| { Error::internal_error(&format!( - "updating resource usage: {e}" + "updating resource provisioning: {e}" )) })?, ) diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index c76907ee4f5..8d3cf4ec978 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -269,7 +269,7 @@ impl super::Nexus { -i64::try_from(instance.runtime_state.memory.to_bytes()) .map_err(|e| { Error::internal_error(&format!( - "updating resource usage: {e}" + "updating resource provisioning: {e}" )) })?, ) diff --git a/nexus/src/app/project.rs b/nexus/src/app/project.rs index 03f4d1ff18e..1fc79a81e3c 100644 --- a/nexus/src/app/project.rs +++ b/nexus/src/app/project.rs @@ -35,9 +35,9 @@ impl super::Nexus { .lookup_for(authz::Action::CreateChild) .await?; - // TODO: We probably want to have "project creation", "resource usage - // creation", and "default VPC creation" co-located within a saga for - // atomicity. + // TODO: We probably want to have "project creation", "resource + // provisioning creation", and "default VPC creation" co-located within + // a saga for atomicity. // // Until then, we just perform the operations sequentially. diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index e9c144df910..152942d492b 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -297,7 +297,7 @@ async fn sdc_account_space( i64::try_from(disk_created.size.to_bytes()) .map_err(|e| { Error::internal_error(&format!( - "updating resource usage: {e}" + "updating resource provisioning: {e}" )) }) .map_err(ActionError::action_failed)?, @@ -322,7 +322,9 @@ async fn sdc_account_space_undo( &opctx, params.project_id, -i64::try_from(disk_created.size.to_bytes()).map_err(|e| { - Error::internal_error(&format!("updating resource usage: {e}")) + Error::internal_error(&format!( + "updating resource provisioning: {e}" + )) })?, ) .await diff --git a/nexus/src/app/sagas/disk_delete.rs b/nexus/src/app/sagas/disk_delete.rs index dcb08804a36..e6f9e31cf7c 100644 --- a/nexus/src/app/sagas/disk_delete.rs +++ b/nexus/src/app/sagas/disk_delete.rs @@ -120,7 +120,7 @@ async fn sdd_account_space( -i64::try_from(deleted_disk.size.to_bytes()) .map_err(|e| { Error::internal_error(&format!( - "updating resource usage: {e}" + "updating resource provisioning: {e}" )) }) .map_err(ActionError::action_failed)?, @@ -147,7 +147,7 @@ async fn sdd_account_space_undo( i64::try_from(deleted_disk.size.to_bytes()) .map_err(|e| { Error::internal_error(&format!( - "updating resource usage: {e}" + "updating resource provisioning: {e}" )) }) .map_err(ActionError::action_failed)?, diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index 20948edcade..69ce207ea1f 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -836,7 +836,7 @@ async fn sic_account_resources( i64::try_from(params.create_params.memory.to_bytes()) .map_err(|e| { Error::internal_error(&format!( - "updating resource usage: {e}" + "updating resource provisioning: {e}" )) }) .map_err(ActionError::action_failed)?, @@ -863,7 +863,7 @@ async fn sic_account_resources_undo( -i64::try_from(params.create_params.memory.to_bytes()) .map_err(|e| { Error::internal_error(&format!( - "updating resource usage: {e}" + "updating resource provisioning: {e}" )) }) .map_err(ActionError::action_failed)?, diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index b8fb686d151..7feb38ce75a 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -557,7 +557,7 @@ async fn ssc_account_space( i64::try_from(snapshot_created.size.to_bytes()) .map_err(|e| { Error::internal_error(&format!( - "updating resource usage: {e}" + "updating resource provisioning: {e}" )) }) .map_err(ActionError::action_failed)?, @@ -585,7 +585,7 @@ async fn ssc_account_space_undo( -i64::try_from(snapshot_created.size.to_bytes()) .map_err(|e| { Error::internal_error(&format!( - "updating resource usage: {e}" + "updating resource provisioning: {e}" )) }) .map_err(ActionError::action_failed)?, diff --git a/nexus/src/db/datastore/project.rs b/nexus/src/db/datastore/project.rs index bbac606d176..c58b55224dc 100644 --- a/nexus/src/db/datastore/project.rs +++ b/nexus/src/db/datastore/project.rs @@ -76,7 +76,7 @@ impl DataStore { } })?; - // Create resource usage for the project. + // Create resource provisioning for the project. self.virtual_resource_provisioning_create_on_connection( &conn, VirtualResourceProvisioning::new( diff --git a/nexus/src/db/datastore/virtual_resource_provisioning.rs b/nexus/src/db/datastore/virtual_resource_provisioning.rs index a7c4bf63721..6011e8b7fcf 100644 --- a/nexus/src/db/datastore/virtual_resource_provisioning.rs +++ b/nexus/src/db/datastore/virtual_resource_provisioning.rs @@ -63,14 +63,17 @@ impl Producer { Self { samples: Arc::new(Mutex::new(vec![])) } } - fn append_disk_metrics(&self, usages: &Vec) { - let new_samples = usages + fn append_disk_metrics( + &self, + provisions: &Vec, + ) { + let new_samples = provisions .iter() - .map(|usage| { + .map(|provision| { Sample::new( - &CollectionTarget { id: usage.id }, + &CollectionTarget { id: provision.id }, &VirtualDiskSpaceProvisioned { - bytes_used: usage.virtual_disk_bytes_provisioned, + bytes_used: provision.virtual_disk_bytes_provisioned, }, ) }) @@ -79,19 +82,22 @@ impl Producer { self.append(new_samples); } - fn append_cpu_metrics(&self, usages: &Vec) { - let new_samples = usages + fn append_cpu_metrics( + &self, + provisions: &Vec, + ) { + let new_samples = provisions .iter() - .map(|usage| { + .map(|provision| { Sample::new( - &CollectionTarget { id: usage.id }, - &CpusProvisioned { cpus: usage.cpus_provisioned }, + &CollectionTarget { id: provision.id }, + &CpusProvisioned { cpus: provision.cpus_provisioned }, ) }) - .chain(usages.iter().map(|usage| { + .chain(provisions.iter().map(|provision| { Sample::new( - &CollectionTarget { id: usage.id }, - &RamProvisioned { bytes: usage.ram_provisioned }, + &CollectionTarget { id: provision.id }, + &RamProvisioned { bytes: provision.ram_provisioned }, ) })) .collect::>(); @@ -144,7 +150,7 @@ impl DataStore { { use db::schema::virtual_resource_provisioning::dsl; - let usages: Vec = + let provisions: Vec = diesel::insert_into(dsl::virtual_resource_provisioning) .values(virtual_resource_provisioning) .on_conflict_do_nothing() @@ -157,9 +163,10 @@ impl DataStore { ) })?; self.virtual_resource_provisioning_producer - .append_disk_metrics(&usages); - self.virtual_resource_provisioning_producer.append_cpu_metrics(&usages); - Ok(usages) + .append_disk_metrics(&provisions); + self.virtual_resource_provisioning_producer + .append_cpu_metrics(&provisions); + Ok(provisions) } pub async fn virtual_resource_provisioning_get( @@ -204,14 +211,14 @@ impl DataStore { Ok(()) } - /// Transitively updates all provisioned disk usage from project -> fleet. + /// Transitively updates all provisioned disk provisions from project -> fleet. pub async fn virtual_resource_provisioning_update_disk( &self, opctx: &OpContext, project_id: Uuid, disk_byte_diff: i64, ) -> Result, Error> { - let usages = VirtualResourceProvisioningUpdate::new_update_disk( + let provisions = VirtualResourceProvisioningUpdate::new_update_disk( project_id, disk_byte_diff, ) @@ -219,11 +226,11 @@ impl DataStore { .await .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; self.virtual_resource_provisioning_producer - .append_disk_metrics(&usages); - Ok(usages) + .append_disk_metrics(&provisions); + Ok(provisions) } - /// Transitively updates all CPU/RAM usage from project -> fleet. + /// Transitively updates all CPU/RAM provisions from project -> fleet. pub async fn virtual_resource_provisioning_update_cpus_and_ram( &self, opctx: &OpContext, @@ -231,7 +238,7 @@ impl DataStore { cpus_diff: i64, ram_diff: i64, ) -> Result, Error> { - let usages = + let provisions = VirtualResourceProvisioningUpdate::new_update_cpus_and_ram( project_id, cpus_diff, ram_diff, ) @@ -240,7 +247,8 @@ impl DataStore { .map_err(|e| { public_error_from_diesel_pool(e, ErrorHandler::Server) })?; - self.virtual_resource_provisioning_producer.append_cpu_metrics(&usages); - Ok(usages) + self.virtual_resource_provisioning_producer + .append_cpu_metrics(&provisions); + Ok(provisions) } } diff --git a/nexus/src/db/queries/virtual_resource_provisioning_update.rs b/nexus/src/db/queries/virtual_resource_provisioning_update.rs index 740cd1e935a..3c6b4120aae 100644 --- a/nexus/src/db/queries/virtual_resource_provisioning_update.rs +++ b/nexus/src/db/queries/virtual_resource_provisioning_update.rs @@ -2,7 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! Implementation of queries for updating resource usage info. +//! Implementation of queries for updating resource provisioning info. use crate::db::alias::ExpressionAlias; use crate::db::model::VirtualResourceProvisioning; @@ -127,7 +127,7 @@ impl AllCollections { } } -/// Constructs a CTE for updating resource usage information in all +/// Constructs a CTE for updating resource provisioning information in all /// collections for a particular object. #[derive(QueryId)] pub struct VirtualResourceProvisioningUpdate { diff --git a/nexus/tests/integration_tests/snapshots.rs b/nexus/tests/integration_tests/snapshots.rs index 561e4ec452e..a8a884d66d0 100644 --- a/nexus/tests/integration_tests/snapshots.rs +++ b/nexus/tests/integration_tests/snapshots.rs @@ -323,12 +323,12 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); - let usage = datastore + let provision = datastore .virtual_resource_provisioning_get(&opctx, project_id) .await .unwrap(); assert_eq!( - usage.virtual_disk_bytes_provisioned, + provision.virtual_disk_bytes_provisioned, disk_size.to_bytes() as i64 ); @@ -353,12 +353,12 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { assert_eq!(snapshot.disk_id, base_disk.identity.id); assert_eq!(snapshot.size, base_disk.size); - let usage = datastore + let provision = datastore .virtual_resource_provisioning_get(&opctx, project_id) .await .unwrap(); assert_eq!( - usage.virtual_disk_bytes_provisioned, + provision.virtual_disk_bytes_provisioned, 2 * disk_size.to_bytes() as i64 ); @@ -388,12 +388,12 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .parsed_body() .unwrap(); - let usage = datastore + let provision = datastore .virtual_resource_provisioning_get(&opctx, project_id) .await .unwrap(); assert_eq!( - usage.virtual_disk_bytes_provisioned, + provision.virtual_disk_bytes_provisioned, 3 * disk_size.to_bytes() as i64 ); @@ -412,12 +412,12 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .await .unwrap(); - let usage = datastore + let provision = datastore .virtual_resource_provisioning_get(&opctx, project_id) .await .unwrap(); assert_eq!( - usage.virtual_disk_bytes_provisioned, + provision.virtual_disk_bytes_provisioned, 2 * disk_size.to_bytes() as i64 ); @@ -428,12 +428,12 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .execute() .await .expect("failed to delete disk"); - let usage = datastore + let provision = datastore .virtual_resource_provisioning_get(&opctx, project_id) .await .unwrap(); assert_eq!( - usage.virtual_disk_bytes_provisioned, + provision.virtual_disk_bytes_provisioned, disk_size.to_bytes() as i64 ); @@ -444,11 +444,11 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .execute() .await .expect("failed to delete disk"); - let usage = datastore + let provision = datastore .virtual_resource_provisioning_get(&opctx, project_id) .await .unwrap(); - assert_eq!(usage.virtual_disk_bytes_provisioned, 0); + assert_eq!(provision.virtual_disk_bytes_provisioned, 0); } // Test the various ways Nexus can reject a disk created from a snapshot diff --git a/openapi/nexus.json b/openapi/nexus.json index 42d93b277a3..32db3e5baef 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -530,6 +530,66 @@ } } }, + "/groups": { + "get": { + "tags": [ + "silos" + ], + "summary": "List groups", + "operationId": "group_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + }, + "style": "form" + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + }, + "style": "form" + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + }, + "style": "form" + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GroupResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": true + } + }, "/login": { "post": { "tags": [ @@ -559,6 +619,57 @@ } } }, + "/login/{silo_name}/local": { + "post": { + "tags": [ + "login" + ], + "summary": "Authenticate a user (i.e., log in) via username and password", + "operationId": "login_local", + "parameters": [ + { + "in": "path", + "name": "silo_name", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + }, + "style": "simple" + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UsernamePasswordCredentials" + } + } + }, + "required": true + }, + "responses": { + "303": { + "description": "redirect (see other)", + "headers": { + "location": { + "description": "HTTP \"Location\" header", + "style": "simple", + "required": true, + "schema": { + "type": "string" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/login/{silo_name}/saml/{provider_name}": { "get": { "tags": [ @@ -613,8 +724,7 @@ "tags": [ "login" ], - "summary": "Authenticate a user", - "description": "Either receive a username and password, or some sort of identity provider data (like a SAMLResponse). Use these to set the user's session cookie.", + "summary": "Authenticate a user (i.e., log in) via SAML", "operationId": "login_saml", "parameters": [ { @@ -2916,6 +3026,55 @@ } } }, + "/organizations/{organization_name}/projects/{project_name}/instances/{instance_name}/serial-console/stream": { + "get": { + "tags": [ + "instances" + ], + "summary": "Connect to an instance's serial console", + "operationId": "instance_serial_console_stream", + "parameters": [ + { + "in": "path", + "name": "instance_name", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + }, + "style": "simple" + }, + { + "in": "path", + "name": "organization_name", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + }, + "style": "simple" + }, + { + "in": "path", + "name": "project_name", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + }, + "style": "simple" + } + ], + "responses": { + "default": { + "description": "", + "content": { + "*/*": { + "schema": {} + } + } + } + }, + "x-dropshot-websocket": {} + } + }, "/organizations/{organization_name}/projects/{project_name}/instances/{instance_name}/start": { "post": { "tags": [ @@ -5156,6 +5315,66 @@ } } }, + "/session/me/groups": { + "get": { + "tags": [ + "hidden" + ], + "summary": "Fetch the silo groups the current user belongs to", + "operationId": "session_me_groups", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + }, + "style": "form" + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + }, + "style": "form" + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + }, + "style": "form" + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GroupResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": true + } + }, "/session/me/sshkeys": { "get": { "tags": [ @@ -6847,13 +7066,14 @@ "x-dropshot-pagination": true } }, - "/system/silos/{silo_name}/identity-providers/saml": { + "/system/silos/{silo_name}/identity-providers/local/users": { "post": { "tags": [ "system" ], - "summary": "Create a SAML IDP", - "operationId": "saml_identity_provider_create", + "summary": "Create a user", + "description": "Users can only be created in Silos with `provision_type` == `Fixed`. Otherwise, Silo users are just-in-time (JIT) provisioned when a user first logs in using an external Identity Provider.", + "operationId": "local_idp_user_create", "parameters": [ { "in": "path", @@ -6870,7 +7090,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/SamlIdentityProviderCreate" + "$ref": "#/components/schemas/UserCreate" } } }, @@ -6882,7 +7102,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/SamlIdentityProvider" + "$ref": "#/components/schemas/User" } } } @@ -6896,18 +7116,17 @@ } } }, - "/system/silos/{silo_name}/identity-providers/saml/{provider_name}": { - "get": { + "/system/silos/{silo_name}/identity-providers/local/users/{user_id}": { + "delete": { "tags": [ "system" ], - "summary": "Fetch a SAML IDP", - "operationId": "saml_identity_provider_view", + "operationId": "local_idp_user_delete", "parameters": [ { "in": "path", - "name": "provider_name", - "description": "The SAML identity provider's name", + "name": "silo_name", + "description": "The silo's unique name.", "required": true, "schema": { "$ref": "#/components/schemas/Name" @@ -6916,25 +7135,19 @@ }, { "in": "path", - "name": "silo_name", - "description": "The silo's unique name.", + "name": "user_id", + "description": "The user's internal id", "required": true, "schema": { - "$ref": "#/components/schemas/Name" + "type": "string", + "format": "uuid" }, "style": "simple" } ], "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SamlIdentityProvider" - } - } - } + "204": { + "description": "successful deletion" }, "4XX": { "$ref": "#/components/responses/Error" @@ -6945,13 +7158,14 @@ } } }, - "/system/silos/{silo_name}/policy": { - "get": { + "/system/silos/{silo_name}/identity-providers/local/users/{user_id}/set-password": { + "post": { "tags": [ "system" ], - "summary": "Fetch a silo's IAM policy", - "operationId": "silo_policy_view", + "summary": "Set or invalidate a user's password", + "description": "Passwords can only be updated for users in Silos with identity mode `LocalOnly`.", + "operationId": "local_idp_user_set_password", "parameters": [ { "in": "path", @@ -6962,15 +7176,166 @@ "$ref": "#/components/schemas/Name" }, "style": "simple" + }, + { + "in": "path", + "name": "user_id", + "description": "The user's internal id", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + }, + "style": "simple" } ], - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SiloRolePolicy" + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UserPassword" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/system/silos/{silo_name}/identity-providers/saml": { + "post": { + "tags": [ + "system" + ], + "summary": "Create a SAML IDP", + "operationId": "saml_identity_provider_create", + "parameters": [ + { + "in": "path", + "name": "silo_name", + "description": "The silo's unique name.", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + }, + "style": "simple" + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SamlIdentityProviderCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SamlIdentityProvider" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/system/silos/{silo_name}/identity-providers/saml/{provider_name}": { + "get": { + "tags": [ + "system" + ], + "summary": "Fetch a SAML IDP", + "operationId": "saml_identity_provider_view", + "parameters": [ + { + "in": "path", + "name": "provider_name", + "description": "The SAML identity provider's name", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + }, + "style": "simple" + }, + { + "in": "path", + "name": "silo_name", + "description": "The silo's unique name.", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + }, + "style": "simple" + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SamlIdentityProvider" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/system/silos/{silo_name}/policy": { + "get": { + "tags": [ + "system" + ], + "summary": "Fetch a silo's IAM policy", + "operationId": "silo_policy_view", + "parameters": [ + { + "in": "path", + "name": "silo_name", + "description": "The silo's unique name.", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + }, + "style": "simple" + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SiloRolePolicy" } } } @@ -7031,6 +7396,125 @@ } } }, + "/system/silos/{silo_name}/users/all": { + "get": { + "tags": [ + "system" + ], + "summary": "List users in a specific Silo", + "operationId": "silo_users_list", + "parameters": [ + { + "in": "path", + "name": "silo_name", + "description": "The silo's unique name.", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + }, + "style": "simple" + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + }, + "style": "form" + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + }, + "style": "form" + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + }, + "style": "form" + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UserResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": true + } + }, + "/system/silos/{silo_name}/users/id/{user_id}": { + "get": { + "tags": [ + "system" + ], + "operationId": "silo_user_view", + "parameters": [ + { + "in": "path", + "name": "silo_name", + "description": "The silo's unique name.", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + }, + "style": "simple" + }, + { + "in": "path", + "name": "user_id", + "description": "The user's internal id", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + }, + "style": "simple" + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/User" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/system/updates/refresh": { "post": { "tags": [ @@ -8475,6 +8959,51 @@ "items" ] }, + "Group": { + "description": "Client view of a [`Group`]", + "type": "object", + "properties": { + "display_name": { + "description": "Human-readable name that can identify the group", + "type": "string" + }, + "id": { + "type": "string", + "format": "uuid" + }, + "silo_id": { + "description": "Uuid of the silo to which this group belongs", + "type": "string", + "format": "uuid" + } + }, + "required": [ + "display_name", + "id", + "silo_id" + ] + }, + "GroupResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/Group" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, "Histogramdouble": { "description": "A simple type for managing a histogram metric.\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.\n\nExample ------- ```rust use oximeter::histogram::{BinRange, Histogram};\n\nlet edges = [0i64, 10, 20]; let mut hist = Histogram::new(&edges).unwrap(); assert_eq!(hist.n_bins(), 4); // One additional bin for the range (20..) assert_eq!(hist.n_samples(), 0); hist.sample(4); hist.sample(100); assert_eq!(hist.n_samples(), 2);\n\nlet data = hist.iter().collect::>(); assert_eq!(data[0].range, BinRange::range(i64::MIN, 0)); // An additional bin for `..0` assert_eq!(data[0].count, 0); // Nothing is in this bin\n\nassert_eq!(data[1].range, BinRange::range(0, 10)); // The range `0..10` assert_eq!(data[1].count, 1); // 4 is sampled into this bin ```\n\nNotes -----\n\nHistograms may be constructed either from their left bin edges, or from a sequence of ranges. In either case, the left-most bin may be converted upon construction. In particular, if the left-most value is not equal to the minimum of the support, a new bin will be added from the minimum to that provided value. If the left-most value _is_ the support's minimum, because the provided bin was unbounded below, such as `(..0)`, then that bin will be converted into one bounded below, `(MIN..0)` in this case.\n\nThe short of this is that, most of the time, it shouldn't matter. If one specifies the extremes of the support as their bins, be aware that the left-most may be converted from a `BinRange::RangeTo` into a `BinRange::Range`. In other words, the first bin of a histogram is _always_ a `Bin::Range` or a `Bin::RangeFrom` after construction. In fact, every bin is one of those variants, the `BinRange::RangeTo` is only provided as a convenience during construction.", "type": "object", @@ -9463,7 +9992,7 @@ "title": "An IPv4 subnet", "description": "An IPv4 subnet, including prefix and subnet mask", "type": "string", - "pattern": "^(10\\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\/([8-9]|1[0-9]|2[0-9]|3[0-2])|172\\.(1[6-9]|2[0-9]|3[0-1])\\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\/(1[2-9]|2[0-9]|3[0-2])|192\\.168\\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\/(1[6-9]|2[0-9]|3[0-2]))$" + "pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([8-9]|1[0-9]|2[0-9]|3[0-2])$" }, "Ipv4Range": { "description": "A non-decreasing IPv4 address range, inclusive of both ends.\n\nThe first address must be less than or equal to the last address.", @@ -9877,6 +10406,12 @@ } } }, + "Password": { + "title": "A password used to authenticate a user", + "description": "Passwords may be subject to additional constraints.", + "type": "string", + "maxLength": 512 + }, "Project": { "description": "Client view of a [`Project`]", "type": "object", @@ -11280,11 +11815,17 @@ "id": { "type": "string", "format": "uuid" + }, + "silo_id": { + "description": "Uuid of the silo to which this user belongs", + "type": "string", + "format": "uuid" } }, "required": [ "display_name", - "id" + "id", + "silo_id" ] }, "UserBuiltin": { @@ -11348,6 +11889,78 @@ "items" ] }, + "UserCreate": { + "description": "Create-time parameters for a [`User`](crate::external_api::views::User)", + "type": "object", + "properties": { + "external_id": { + "description": "username used to log in", + "allOf": [ + { + "$ref": "#/components/schemas/UserId" + } + ] + }, + "password": { + "description": "password used to log in", + "allOf": [ + { + "$ref": "#/components/schemas/UserPassword" + } + ] + } + }, + "required": [ + "external_id", + "password" + ] + }, + "UserId": { + "title": "A name unique within the parent collection", + "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID though they may contain a UUID.", + "type": "string", + "pattern": "^(?![0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$)^[a-z][a-z0-9-]*[a-zA-Z0-9]$", + "maxLength": 63 + }, + "UserPassword": { + "description": "Parameters for setting a user's password", + "oneOf": [ + { + "description": "Sets the user's password to the provided value", + "type": "object", + "properties": { + "details": { + "$ref": "#/components/schemas/Password" + }, + "user_password_value": { + "type": "string", + "enum": [ + "password" + ] + } + }, + "required": [ + "details", + "user_password_value" + ] + }, + { + "description": "Invalidates any current password (disabling password authentication)", + "type": "object", + "properties": { + "user_password_value": { + "type": "string", + "enum": [ + "invalid_password" + ] + } + }, + "required": [ + "user_password_value" + ] + } + ] + }, "UserResultsPage": { "description": "A single page of results", "type": "object", @@ -11369,6 +11982,22 @@ "items" ] }, + "UsernamePasswordCredentials": { + "description": "Credentials for local user login", + "type": "object", + "properties": { + "password": { + "$ref": "#/components/schemas/Password" + }, + "username": { + "$ref": "#/components/schemas/UserId" + } + }, + "required": [ + "password", + "username" + ] + }, "Vpc": { "description": "Client view of a [`Vpc`]", "type": "object", @@ -12226,6 +12855,18 @@ } } }, + "IdSortMode": { + "description": "Supported set of sort modes for scanning by id only.\n\nCurrently, we only support scanning in ascending order.", + "oneOf": [ + { + "description": "sort in increasing order of \"id\"", + "type": "string", + "enum": [ + "id_ascending" + ] + } + ] + }, "NameOrIdSortMode": { "description": "Supported set of sort modes for scanning by name or id", "oneOf": [ @@ -12275,18 +12916,6 @@ "write_bytes" ] }, - "IdSortMode": { - "description": "Supported set of sort modes for scanning by id only.\n\nCurrently, we only support scanning in ascending order.", - "oneOf": [ - { - "description": "sort in increasing order of \"id\"", - "type": "string", - "enum": [ - "id_ascending" - ] - } - ] - }, "SystemMetricName": { "type": "string", "enum": [ From acd4d773e283192d4d8e55cb5a0d008a2f655da6 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 7 Dec 2022 10:02:10 -0500 Subject: [PATCH 36/80] link to query --- .../src/queries/virtual_resource_provisioning_update.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nexus/db-model/src/queries/virtual_resource_provisioning_update.rs b/nexus/db-model/src/queries/virtual_resource_provisioning_update.rs index 7ddce271be7..992322fd800 100644 --- a/nexus/db-model/src/queries/virtual_resource_provisioning_update.rs +++ b/nexus/db-model/src/queries/virtual_resource_provisioning_update.rs @@ -3,6 +3,9 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. //! Describes the resource provisioning update CTE +//! +//! Refer to +//! for the construction of this query. use crate::schema::organization; use crate::schema::silo; From ec48ea097c1147114aeee03a00e8e1336251d3b5 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 7 Dec 2022 11:54:49 -0500 Subject: [PATCH 37/80] VERY WIP - MOVE ACCOUNTING FOR DISKS/INSTANCES CLOSER TO CREATION/DELETION --- .../src/virtual_resource_provisioning.rs | 5 ++ nexus/src/app/sagas/instance_create.rs | 3 +- nexus/src/db/datastore/instance.rs | 59 +++++++++++++++---- nexus/src/db/queries/network_interface.rs | 12 ++-- 4 files changed, 59 insertions(+), 20 deletions(-) diff --git a/nexus/db-model/src/virtual_resource_provisioning.rs b/nexus/db-model/src/virtual_resource_provisioning.rs index 55591c6fc30..5bc4c3bc146 100644 --- a/nexus/db-model/src/virtual_resource_provisioning.rs +++ b/nexus/db-model/src/virtual_resource_provisioning.rs @@ -7,6 +7,9 @@ use uuid::Uuid; #[derive(Debug)] pub enum CollectionType { + Instance, + Disk, + Project, Organization, Silo, @@ -16,6 +19,8 @@ pub enum CollectionType { impl std::fmt::Display for CollectionType { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { + CollectionType::Instance => write!(f, "instance"), + CollectionType::Disk => write!(f, "disk"), CollectionType::Project => write!(f, "project"), CollectionType::Organization => write!(f, "organization"), CollectionType::Silo => write!(f, "silo"), diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index 69ce207ea1f..f187f18807e 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -887,6 +887,7 @@ async fn sic_create_instance_record( ) -> Result { let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; + let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); let sled_uuid = sagactx.lookup::("server_id")?; let instance_id = sagactx.lookup::("instance_id")?; let propolis_uuid = sagactx.lookup::("propolis_id")?; @@ -918,7 +919,7 @@ async fn sic_create_instance_record( let instance = osagactx .datastore() - .project_create_instance(new_instance) + .project_create_instance(&opctx, new_instance) .await .map_err(ActionError::action_failed)?; diff --git a/nexus/src/db/datastore/instance.rs b/nexus/src/db/datastore/instance.rs index 27d330eb50a..5cd2d6340fd 100644 --- a/nexus/src/db/datastore/instance.rs +++ b/nexus/src/db/datastore/instance.rs @@ -13,14 +13,18 @@ use crate::db::collection_detach_many::DatastoreDetachManyTarget; use crate::db::collection_detach_many::DetachManyError; use crate::db::error::public_error_from_diesel_pool; use crate::db::error::ErrorHandler; +use crate::db::error::TransactionError; use crate::db::identity::Resource; use crate::db::lookup::LookupPath; +use crate::db::model::CollectionType; use crate::db::model::Instance; use crate::db::model::InstanceRuntimeState; use crate::db::model::Name; +use crate::db::model::VirtualResourceProvisioning; use crate::db::pagination::paginated; use crate::db::update_and_check::UpdateAndCheck; use crate::db::update_and_check::UpdateStatus; +use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; @@ -60,27 +64,56 @@ impl DataStore { // what this function does under the hood). pub async fn project_create_instance( &self, + opctx: &OpContext, instance: Instance, ) -> CreateResult { use db::schema::instance::dsl; let gen = instance.runtime().gen; let name = instance.name().clone(); - let instance: Instance = diesel::insert_into(dsl::instance) - .values(instance) - .on_conflict(dsl::id) - .do_nothing() - .returning(Instance::as_returning()) - .get_result_async(self.pool()) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::Instance, - name.as_str(), + + let instance = self.pool_authorized(opctx) + .await? + .transaction_async(|conn| async move { + // TODO: Use "collection_insert" to "insert_resource" into a + // Project. Otherwise, the project could be concurrently + // deleted during instance creation! + let instance: Instance = diesel::insert_into(dsl::instance) + .values(instance) + .on_conflict(dsl::id) + .do_nothing() + .returning(Instance::as_returning()) + .get_result_async(&conn) + .await + .map_err(|e| { + public_error_from_diesel_pool( + async_bb8_diesel::PoolError::Connection(e), + ErrorHandler::Conflict( + ResourceType::Instance, + name.as_str(), + ), + ) + })?; + + // Create resource provisioning for the instance. + // TODO: Create with contents??? + self.virtual_resource_provisioning_create_on_connection( + &conn, + VirtualResourceProvisioning::new( + instance.id(), + CollectionType::Instance, ), ) + .await?; + + Ok(instance) + }) + .await + .map_err(|e| match e { + TransactionError::CustomError(e) => e, + TransactionError::Pool(e) => { + public_error_from_diesel_pool(e, ErrorHandler::Server) + } })?; bail_unless!( diff --git a/nexus/src/db/queries/network_interface.rs b/nexus/src/db/queries/network_interface.rs index 825d3068494..621ec83b5b4 100644 --- a/nexus/src/db/queries/network_interface.rs +++ b/nexus/src/db/queries/network_interface.rs @@ -1534,7 +1534,7 @@ mod tests { // Add an instance. We'll use this to verify that the instance must be // stopped to add or delete interfaces. - async fn create_instance(db_datastore: &DataStore) -> Instance { + async fn create_instance(opctx: &OpContext, db_datastore: &DataStore) -> Instance { let instance_id = Uuid::new_v4(); let project_id = "f89892a0-58e0-60c8-a164-a82d0bd29ff4".parse().unwrap(); @@ -1574,13 +1574,13 @@ mod tests { let instance = Instance::new(instance_id, project_id, ¶ms, runtime.into()); db_datastore - .project_create_instance(instance) + .project_create_instance(opctx, instance) .await .expect("Failed to create new instance record") } - async fn create_stopped_instance(db_datastore: &DataStore) -> Instance { - let instance = create_instance(db_datastore).await; + async fn create_stopped_instance(opctx: &OpContext, db_datastore: &DataStore) -> Instance { + let instance = create_instance(opctx, db_datastore).await; instance_set_state( db_datastore, instance, @@ -1712,7 +1712,7 @@ mod tests { ) -> Instance { instance_set_state( &self.db_datastore, - create_instance(&self.db_datastore).await, + create_instance(&self.opctx, &self.db_datastore).await, state, ) .await @@ -2097,7 +2097,7 @@ mod tests { } // Next one should fail - let instance = create_stopped_instance(&context.db_datastore).await; + let instance = create_stopped_instance(&context.opctx, &context.db_datastore).await; let interface = IncompleteNetworkInterface::new( Uuid::new_v4(), instance.id(), From 26539113ad5ee18b67cada9f2c35b0a82886e8b4 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 9 Dec 2022 11:47:27 -0500 Subject: [PATCH 38/80] fmt --- nexus/src/db/datastore/instance.rs | 3 ++- nexus/src/db/queries/network_interface.rs | 14 +++++++++++--- nexus/src/external_api/http_entrypoints.rs | 8 ++++++-- 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/nexus/src/db/datastore/instance.rs b/nexus/src/db/datastore/instance.rs index 5cd2d6340fd..a408cd6a9aa 100644 --- a/nexus/src/db/datastore/instance.rs +++ b/nexus/src/db/datastore/instance.rs @@ -72,7 +72,8 @@ impl DataStore { let gen = instance.runtime().gen; let name = instance.name().clone(); - let instance = self.pool_authorized(opctx) + let instance = self + .pool_authorized(opctx) .await? .transaction_async(|conn| async move { // TODO: Use "collection_insert" to "insert_resource" into a diff --git a/nexus/src/db/queries/network_interface.rs b/nexus/src/db/queries/network_interface.rs index 621ec83b5b4..e7ae6efa4d2 100644 --- a/nexus/src/db/queries/network_interface.rs +++ b/nexus/src/db/queries/network_interface.rs @@ -1534,7 +1534,10 @@ mod tests { // Add an instance. We'll use this to verify that the instance must be // stopped to add or delete interfaces. - async fn create_instance(opctx: &OpContext, db_datastore: &DataStore) -> Instance { + async fn create_instance( + opctx: &OpContext, + db_datastore: &DataStore, + ) -> Instance { let instance_id = Uuid::new_v4(); let project_id = "f89892a0-58e0-60c8-a164-a82d0bd29ff4".parse().unwrap(); @@ -1579,7 +1582,10 @@ mod tests { .expect("Failed to create new instance record") } - async fn create_stopped_instance(opctx: &OpContext, db_datastore: &DataStore) -> Instance { + async fn create_stopped_instance( + opctx: &OpContext, + db_datastore: &DataStore, + ) -> Instance { let instance = create_instance(opctx, db_datastore).await; instance_set_state( db_datastore, @@ -2097,7 +2103,9 @@ mod tests { } // Next one should fail - let instance = create_stopped_instance(&context.opctx, &context.db_datastore).await; + let instance = + create_stopped_instance(&context.opctx, &context.db_datastore) + .await; let interface = IncompleteNetworkInterface::new( Uuid::new_v4(), instance.id(), diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 0c4d2602580..3c15a6cf77c 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -2322,7 +2322,9 @@ async fn instance_delete_v1( params::InstanceSelector::new(path.instance, &query.selector), ) .await?; - nexus.project_destroy_instance(&opctx, &authz_instance, &instance).await?; + nexus + .project_destroy_instance(&opctx, &authz_instance, &instance) + .await?; Ok(HttpResponseDeleted()) }; apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await @@ -2358,7 +2360,9 @@ async fn instance_delete( }, ) .await?; - nexus.project_destroy_instance(&opctx, &authz_instance, &instance).await?; + nexus + .project_destroy_instance(&opctx, &authz_instance, &instance) + .await?; Ok(HttpResponseDeleted()) }; apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await From c8a20316dc37a0b73bdf379ba08c492e73eda9e8 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 13 Dec 2022 10:30:52 -0500 Subject: [PATCH 39/80] Distinguish between 'resource' and 'collection'. Add CTE to make updates idempotent --- common/src/sql/dbinit.sql | 22 ++ nexus/db-model/src/lib.rs | 2 + .../virtual_resource_provisioning_update.rs | 7 + nexus/db-model/src/schema.rs | 10 + .../src/virtual_resource_provisioned.rs | 45 +++ .../src/virtual_resource_provisioning.rs | 23 +- nexus/src/app/disk.rs | 4 +- nexus/src/app/instance.rs | 3 +- nexus/src/app/sagas/disk_create.rs | 6 +- nexus/src/app/sagas/disk_delete.rs | 6 +- nexus/src/app/sagas/instance_create.rs | 8 +- nexus/src/app/sagas/snapshot_create.rs | 6 +- nexus/src/db/datastore/instance.rs | 61 +--- nexus/src/db/datastore/organization.rs | 4 +- nexus/src/db/datastore/project.rs | 4 +- nexus/src/db/datastore/silo.rs | 8 +- .../virtual_resource_provisioning.rs | 65 +++- .../virtual_resource_provisioning_update.rs | 285 ++++++++++++++++-- nexus/src/populate.rs | 2 +- 19 files changed, 465 insertions(+), 106 deletions(-) create mode 100644 nexus/db-model/src/virtual_resource_provisioned.rs diff --git a/common/src/sql/dbinit.sql b/common/src/sql/dbinit.sql index 19341f01b5b..7b51ab5c524 100644 --- a/common/src/sql/dbinit.sql +++ b/common/src/sql/dbinit.sql @@ -162,6 +162,28 @@ CREATE TABLE omicron.public.virtual_resource_provisioning ( ram_provisioned INT8 NOT NULL ); +-- A table describing a single virtual resource which has been provisioned. +-- This may include: +-- - Disks +-- - Instances +-- - Snapshots +CREATE TABLE omicron.public.virtual_resource_provisioned ( + -- Should match the UUID of the corresponding collection. + id UUID PRIMARY KEY, + -- Identifies the type of the resource. + resource_type STRING(63) NOT NULL, + + -- The amount of physical disk space which has been provisioned + -- on behalf of the resource. + virtual_disk_bytes_provisioned INT8 NOT NULL, + + -- The number of CPUs provisioned. + cpus_provisioned INT8 NOT NULL, + + -- The amount of RAM provisioned. + ram_provisioned INT8 NOT NULL +); + /* * ZPools of Storage, attached to Sleds. * Typically these are backed by a single physical disk. diff --git a/nexus/db-model/src/lib.rs b/nexus/db-model/src/lib.rs index ac19174a5e2..02398e3c8a0 100644 --- a/nexus/db-model/src/lib.rs +++ b/nexus/db-model/src/lib.rs @@ -63,6 +63,7 @@ mod ssh_key; mod u16; mod update_artifact; mod user_builtin; +mod virtual_resource_provisioned; mod virtual_resource_provisioning; mod vni; mod volume; @@ -129,6 +130,7 @@ pub use snapshot::*; pub use ssh_key::*; pub use update_artifact::*; pub use user_builtin::*; +pub use virtual_resource_provisioned::*; pub use virtual_resource_provisioning::*; pub use vni::*; pub use volume::*; diff --git a/nexus/db-model/src/queries/virtual_resource_provisioning_update.rs b/nexus/db-model/src/queries/virtual_resource_provisioning_update.rs index 992322fd800..59c99df3ca7 100644 --- a/nexus/db-model/src/queries/virtual_resource_provisioning_update.rs +++ b/nexus/db-model/src/queries/virtual_resource_provisioning_update.rs @@ -35,6 +35,12 @@ table! { } } +table! { + do_update (update) { + update -> Bool, + } +} + diesel::allow_tables_to_appear_in_same_query!(organization, parent_org,); diesel::allow_tables_to_appear_in_same_query!(silo, parent_silo,); @@ -44,4 +50,5 @@ diesel::allow_tables_to_appear_in_same_query!( parent_silo, parent_fleet, all_collections, + do_update, ); diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 8ba724f3a40..ea7347aaddd 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -426,6 +426,16 @@ table! { } } +table! { + virtual_resource_provisioned { + id -> Uuid, + resource_type -> Text, + virtual_disk_bytes_provisioned -> Int8, + cpus_provisioned -> Int8, + ram_provisioned -> Int8, + } +} + table! { zpool (id) { id -> Uuid, diff --git a/nexus/db-model/src/virtual_resource_provisioned.rs b/nexus/db-model/src/virtual_resource_provisioned.rs new file mode 100644 index 00000000000..81d61781705 --- /dev/null +++ b/nexus/db-model/src/virtual_resource_provisioned.rs @@ -0,0 +1,45 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::schema::virtual_resource_provisioned; +use uuid::Uuid; + +#[derive(Debug)] +pub enum ResourceTypeProvisioned { + Instance, + Disk, +} + +impl std::fmt::Display for ResourceTypeProvisioned { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ResourceTypeProvisioned::Instance => write!(f, "instance"), + ResourceTypeProvisioned::Disk => write!(f, "disk"), + } + } +} + +/// Describes virtual_resource_provisioned for a resource. +#[derive(Clone, Selectable, Queryable, Insertable, Debug)] +#[diesel(table_name = virtual_resource_provisioned)] +pub struct VirtualResourceProvisioned { + pub id: Uuid, + pub resource_type: String, + + pub virtual_disk_bytes_provisioned: i64, + pub cpus_provisioned: i64, + pub ram_provisioned: i64, +} + +impl VirtualResourceProvisioned { + pub fn new(id: Uuid, resource_type: ResourceTypeProvisioned) -> Self { + Self { + id, + resource_type: resource_type.to_string(), + virtual_disk_bytes_provisioned: 0, + cpus_provisioned: 0, + ram_provisioned: 0, + } + } +} diff --git a/nexus/db-model/src/virtual_resource_provisioning.rs b/nexus/db-model/src/virtual_resource_provisioning.rs index 5bc4c3bc146..e5f245b8f53 100644 --- a/nexus/db-model/src/virtual_resource_provisioning.rs +++ b/nexus/db-model/src/virtual_resource_provisioning.rs @@ -6,31 +6,28 @@ use crate::schema::virtual_resource_provisioning; use uuid::Uuid; #[derive(Debug)] -pub enum CollectionType { - Instance, - Disk, - +pub enum CollectionTypeProvisioned { Project, Organization, Silo, Fleet, } -impl std::fmt::Display for CollectionType { +impl std::fmt::Display for CollectionTypeProvisioned { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - CollectionType::Instance => write!(f, "instance"), - CollectionType::Disk => write!(f, "disk"), - CollectionType::Project => write!(f, "project"), - CollectionType::Organization => write!(f, "organization"), - CollectionType::Silo => write!(f, "silo"), - CollectionType::Fleet => write!(f, "fleet"), + CollectionTypeProvisioned::Project => write!(f, "project"), + CollectionTypeProvisioned::Organization => { + write!(f, "organization") + } + CollectionTypeProvisioned::Silo => write!(f, "silo"), + CollectionTypeProvisioned::Fleet => write!(f, "fleet"), } } } /// Describes virtual_resource_provisioning for a collection -#[derive(Selectable, Queryable, Insertable, Debug)] +#[derive(Clone, Selectable, Queryable, Insertable, Debug)] #[diesel(table_name = virtual_resource_provisioning)] pub struct VirtualResourceProvisioning { pub id: Uuid, @@ -42,7 +39,7 @@ pub struct VirtualResourceProvisioning { } impl VirtualResourceProvisioning { - pub fn new(id: Uuid, collection_type: CollectionType) -> Self { + pub fn new(id: Uuid, collection_type: CollectionTypeProvisioned) -> Self { Self { id, collection_type: collection_type.to_string(), diff --git a/nexus/src/app/disk.rs b/nexus/src/app/disk.rs index 8cea3bbd193..035d8234e6f 100644 --- a/nexus/src/app/disk.rs +++ b/nexus/src/app/disk.rs @@ -12,6 +12,7 @@ use crate::db; use crate::db::lookup::LookupPath; use crate::db::model::Name; use crate::external_api::params; +use nexus_types::identity::Resource; use omicron_common::api::external::ByteCount; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DataPageParams; @@ -513,8 +514,9 @@ impl super::Nexus { // TODO: This should exist within a saga self.db_datastore - .virtual_resource_provisioning_update_disk( + .virtual_resource_provisioning_delete_disk( &opctx, + db_snapshot.id(), project.id(), -i64::try_from(db_snapshot.size.to_bytes()).map_err(|e| { Error::internal_error(&format!( diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index b9d3659ab43..b75cfcc1235 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -299,8 +299,9 @@ impl super::Nexus { .project_delete_instance(opctx, &authz_instance) .await?; self.db_datastore - .virtual_resource_provisioning_update_cpus_and_ram( + .virtual_resource_provisioning_delete_instance( &opctx, + instance.id(), instance.project_id, -i64::from(instance.runtime_state.ncpus.0 .0), -i64::try_from(instance.runtime_state.memory.to_bytes()) diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index 152942d492b..6551e9961ee 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -291,8 +291,9 @@ async fn sdc_account_space( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .virtual_resource_provisioning_update_disk( + .virtual_resource_provisioning_insert_disk( &opctx, + disk_created.id(), params.project_id, i64::try_from(disk_created.size.to_bytes()) .map_err(|e| { @@ -318,8 +319,9 @@ async fn sdc_account_space_undo( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .virtual_resource_provisioning_update_disk( + .virtual_resource_provisioning_delete_disk( &opctx, + disk_created.id(), params.project_id, -i64::try_from(disk_created.size.to_bytes()).map_err(|e| { Error::internal_error(&format!( diff --git a/nexus/src/app/sagas/disk_delete.rs b/nexus/src/app/sagas/disk_delete.rs index e6f9e31cf7c..a79b8c54cf8 100644 --- a/nexus/src/app/sagas/disk_delete.rs +++ b/nexus/src/app/sagas/disk_delete.rs @@ -114,8 +114,9 @@ async fn sdd_account_space( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .virtual_resource_provisioning_update_disk( + .virtual_resource_provisioning_delete_disk( &opctx, + deleted_disk.id(), params.project_id, -i64::try_from(deleted_disk.size.to_bytes()) .map_err(|e| { @@ -141,8 +142,9 @@ async fn sdd_account_space_undo( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .virtual_resource_provisioning_update_disk( + .virtual_resource_provisioning_insert_disk( &opctx, + deleted_disk.id(), params.project_id, i64::try_from(deleted_disk.size.to_bytes()) .map_err(|e| { diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index c4362936e4e..3566b8ef13c 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -823,12 +823,14 @@ async fn sic_account_resources( ) -> Result<(), ActionError> { let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; + let instance_id = sagactx.lookup::("instance_id")?; let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .virtual_resource_provisioning_update_cpus_and_ram( + .virtual_resource_provisioning_insert_instance( &opctx, + instance_id, params.project_id, i64::from(params.create_params.ncpus.0), i64::try_from(params.create_params.memory.to_bytes()) @@ -850,12 +852,14 @@ async fn sic_account_resources_undo( ) -> Result<(), anyhow::Error> { let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; + let instance_id = sagactx.lookup::("instance_id")?; let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .virtual_resource_provisioning_update_cpus_and_ram( + .virtual_resource_provisioning_delete_instance( &opctx, + instance_id, params.project_id, -i64::from(params.create_params.ncpus.0), -i64::try_from(params.create_params.memory.to_bytes()) diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index 7feb38ce75a..1328d720e20 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -551,8 +551,9 @@ async fn ssc_account_space( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .virtual_resource_provisioning_update_disk( + .virtual_resource_provisioning_insert_disk( &opctx, + snapshot_created.id(), params.project_id, i64::try_from(snapshot_created.size.to_bytes()) .map_err(|e| { @@ -579,8 +580,9 @@ async fn ssc_account_space_undo( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .virtual_resource_provisioning_update_disk( + .virtual_resource_provisioning_delete_disk( &opctx, + snapshot_created.id(), params.project_id, -i64::try_from(snapshot_created.size.to_bytes()) .map_err(|e| { diff --git a/nexus/src/db/datastore/instance.rs b/nexus/src/db/datastore/instance.rs index a408cd6a9aa..26c0bb447d2 100644 --- a/nexus/src/db/datastore/instance.rs +++ b/nexus/src/db/datastore/instance.rs @@ -13,18 +13,14 @@ use crate::db::collection_detach_many::DatastoreDetachManyTarget; use crate::db::collection_detach_many::DetachManyError; use crate::db::error::public_error_from_diesel_pool; use crate::db::error::ErrorHandler; -use crate::db::error::TransactionError; use crate::db::identity::Resource; use crate::db::lookup::LookupPath; -use crate::db::model::CollectionType; use crate::db::model::Instance; use crate::db::model::InstanceRuntimeState; use crate::db::model::Name; -use crate::db::model::VirtualResourceProvisioning; use crate::db::pagination::paginated; use crate::db::update_and_check::UpdateAndCheck; use crate::db::update_and_check::UpdateStatus; -use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; @@ -72,49 +68,24 @@ impl DataStore { let gen = instance.runtime().gen; let name = instance.name().clone(); - let instance = self - .pool_authorized(opctx) - .await? - .transaction_async(|conn| async move { - // TODO: Use "collection_insert" to "insert_resource" into a - // Project. Otherwise, the project could be concurrently - // deleted during instance creation! - let instance: Instance = diesel::insert_into(dsl::instance) - .values(instance) - .on_conflict(dsl::id) - .do_nothing() - .returning(Instance::as_returning()) - .get_result_async(&conn) - .await - .map_err(|e| { - public_error_from_diesel_pool( - async_bb8_diesel::PoolError::Connection(e), - ErrorHandler::Conflict( - ResourceType::Instance, - name.as_str(), - ), - ) - })?; - - // Create resource provisioning for the instance. - // TODO: Create with contents??? - self.virtual_resource_provisioning_create_on_connection( - &conn, - VirtualResourceProvisioning::new( - instance.id(), - CollectionType::Instance, + // TODO: Use "collection_insert" to "insert_resource" into a + // Project. Otherwise, the project could be concurrently + // deleted during instance creation! + let instance: Instance = diesel::insert_into(dsl::instance) + .values(instance) + .on_conflict(dsl::id) + .do_nothing() + .returning(Instance::as_returning()) + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict( + ResourceType::Instance, + name.as_str(), ), ) - .await?; - - Ok(instance) - }) - .await - .map_err(|e| match e { - TransactionError::CustomError(e) => e, - TransactionError::Pool(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) - } })?; bail_unless!( diff --git a/nexus/src/db/datastore/organization.rs b/nexus/src/db/datastore/organization.rs index 00395a79d48..298b99f5e26 100644 --- a/nexus/src/db/datastore/organization.rs +++ b/nexus/src/db/datastore/organization.rs @@ -15,7 +15,7 @@ use crate::db::error::public_error_from_diesel_pool; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::identity::Resource; -use crate::db::model::CollectionType; +use crate::db::model::CollectionTypeProvisioned; use crate::db::model::Name; use crate::db::model::Organization; use crate::db::model::OrganizationUpdate; @@ -88,7 +88,7 @@ impl DataStore { &conn, VirtualResourceProvisioning::new( org.id(), - CollectionType::Organization, + CollectionTypeProvisioned::Organization, ), ) .await?; diff --git a/nexus/src/db/datastore/project.rs b/nexus/src/db/datastore/project.rs index c58b55224dc..36adf9d394c 100644 --- a/nexus/src/db/datastore/project.rs +++ b/nexus/src/db/datastore/project.rs @@ -14,7 +14,7 @@ use crate::db::error::public_error_from_diesel_pool; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::identity::Resource; -use crate::db::model::CollectionType; +use crate::db::model::CollectionTypeProvisioned; use crate::db::model::Name; use crate::db::model::Organization; use crate::db::model::Project; @@ -81,7 +81,7 @@ impl DataStore { &conn, VirtualResourceProvisioning::new( project.id(), - CollectionType::Project, + CollectionTypeProvisioned::Project, ), ) .await?; diff --git a/nexus/src/db/datastore/silo.rs b/nexus/src/db/datastore/silo.rs index 9d2e5d14b30..18cf45b5c13 100644 --- a/nexus/src/db/datastore/silo.rs +++ b/nexus/src/db/datastore/silo.rs @@ -15,7 +15,7 @@ use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::fixed_data::silo::DEFAULT_SILO; use crate::db::identity::Resource; -use crate::db::model::CollectionType; +use crate::db::model::CollectionTypeProvisioned; use crate::db::model::Name; use crate::db::model::Silo; use crate::db::model::VirtualResourceProvisioning; @@ -60,7 +60,7 @@ impl DataStore { opctx, VirtualResourceProvisioning::new( DEFAULT_SILO.id(), - CollectionType::Silo, + CollectionTypeProvisioned::Silo, ), ) .await?; @@ -154,7 +154,7 @@ impl DataStore { diesel::insert_into(dsl::virtual_resource_provisioning) .values(VirtualResourceProvisioning::new( silo.id(), - CollectionType::Silo, + CollectionTypeProvisioned::Silo, )) .execute_async(&conn) .await?; @@ -163,7 +163,7 @@ impl DataStore { &conn, VirtualResourceProvisioning::new( DEFAULT_SILO.id(), - CollectionType::Silo, + CollectionTypeProvisioned::Silo, ), ) .await?; diff --git a/nexus/src/db/datastore/virtual_resource_provisioning.rs b/nexus/src/db/datastore/virtual_resource_provisioning.rs index 6011e8b7fcf..cf4e5c5b055 100644 --- a/nexus/src/db/datastore/virtual_resource_provisioning.rs +++ b/nexus/src/db/datastore/virtual_resource_provisioning.rs @@ -211,16 +211,47 @@ impl DataStore { Ok(()) } + // TODO: These could 100% act on model types: + // - Would help with identifying UUID + // - Would help with project ID lookup + // - Would help with calculating resource usage + // + // I think we just need to validate that the model exists when we make these + // calls? Maybe it could be an optional helper? + /// Transitively updates all provisioned disk provisions from project -> fleet. - pub async fn virtual_resource_provisioning_update_disk( + pub async fn virtual_resource_provisioning_insert_disk( &self, opctx: &OpContext, + id: Uuid, project_id: Uuid, disk_byte_diff: i64, ) -> Result, Error> { - let provisions = VirtualResourceProvisioningUpdate::new_update_disk( + let provisions = VirtualResourceProvisioningUpdate::new_insert_disk( + id, + disk_byte_diff, project_id, + ) + .get_results_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; + self.virtual_resource_provisioning_producer + .append_disk_metrics(&provisions); + Ok(provisions) + } + + /// Transitively updates all provisioned disk provisions from project -> fleet. + pub async fn virtual_resource_provisioning_delete_disk( + &self, + opctx: &OpContext, + id: Uuid, + project_id: Uuid, + disk_byte_diff: i64, + ) -> Result, Error> { + let provisions = VirtualResourceProvisioningUpdate::new_delete_disk( + id, disk_byte_diff, + project_id, ) .get_results_async(self.pool_authorized(opctx).await?) .await @@ -231,16 +262,40 @@ impl DataStore { } /// Transitively updates all CPU/RAM provisions from project -> fleet. - pub async fn virtual_resource_provisioning_update_cpus_and_ram( + pub async fn virtual_resource_provisioning_insert_instance( &self, opctx: &OpContext, + id: Uuid, + project_id: Uuid, + cpus_diff: i64, + ram_diff: i64, + ) -> Result, Error> { + let provisions = + VirtualResourceProvisioningUpdate::new_insert_instance( + id, cpus_diff, ram_diff, project_id, + ) + .get_results_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool(e, ErrorHandler::Server) + })?; + self.virtual_resource_provisioning_producer + .append_cpu_metrics(&provisions); + Ok(provisions) + } + + /// Transitively updates all CPU/RAM provisions from project -> fleet. + pub async fn virtual_resource_provisioning_delete_instance( + &self, + opctx: &OpContext, + id: Uuid, project_id: Uuid, cpus_diff: i64, ram_diff: i64, ) -> Result, Error> { let provisions = - VirtualResourceProvisioningUpdate::new_update_cpus_and_ram( - project_id, cpus_diff, ram_diff, + VirtualResourceProvisioningUpdate::new_delete_instance( + id, cpus_diff, ram_diff, project_id, ) .get_results_async(self.pool_authorized(opctx).await?) .await diff --git a/nexus/src/db/queries/virtual_resource_provisioning_update.rs b/nexus/src/db/queries/virtual_resource_provisioning_update.rs index 3c6b4120aae..41a0cd9a146 100644 --- a/nexus/src/db/queries/virtual_resource_provisioning_update.rs +++ b/nexus/src/db/queries/virtual_resource_provisioning_update.rs @@ -5,19 +5,22 @@ //! Implementation of queries for updating resource provisioning info. use crate::db::alias::ExpressionAlias; +use crate::db::model::ResourceTypeProvisioned; +use crate::db::model::VirtualResourceProvisioned; use crate::db::model::VirtualResourceProvisioning; use crate::db::pool::DbConnection; +use crate::db::schema::virtual_resource_provisioned; use crate::db::schema::virtual_resource_provisioning; use crate::db::subquery::{AsQuerySource, Cte, CteBuilder, CteQuery}; use db_macros::Subquery; use diesel::pg::Pg; use diesel::query_builder::{AstPass, Query, QueryFragment, QueryId}; use diesel::{ - sql_types, CombineDsl, ExpressionMethods, IntoSql, QueryDsl, RunQueryDsl, - SelectableHelper, + sql_types, CombineDsl, ExpressionMethods, IntoSql, + NullableExpressionMethods, QueryDsl, RunQueryDsl, SelectableHelper, }; use nexus_db_model::queries::virtual_resource_provisioning_update::{ - all_collections, parent_fleet, parent_org, parent_silo, + all_collections, do_update, parent_fleet, parent_org, parent_silo, }; #[derive(Subquery, QueryId)] @@ -127,6 +130,125 @@ impl AllCollections { } } +#[derive(Subquery, QueryId)] +#[subquery(name = do_update)] +struct DoUpdate { + query: Box>, +} + +impl DoUpdate { + fn new_for_insert(id: uuid::Uuid) -> Self { + use virtual_resource_provisioned::dsl; + + let not_allocted = dsl::virtual_resource_provisioned + .find(id) + .count() + .single_value() + .assume_not_null() + .eq(0); + + Self { + query: Box::new(diesel::select((ExpressionAlias::new::< + do_update::update, + >(not_allocted),))), + } + } + + fn new_for_delete(id: uuid::Uuid) -> Self { + use virtual_resource_provisioned::dsl; + + let already_allocated = dsl::virtual_resource_provisioned + .find(id) + .count() + .single_value() + .assume_not_null() + .eq(1); + + Self { + query: Box::new(diesel::select((ExpressionAlias::new::< + do_update::update, + >(already_allocated),))), + } + } +} + +#[derive(Subquery, QueryId)] +#[subquery(name = virtual_resource_provisioning)] +struct UpdatedProvisions { + query: Box>, +} + +impl UpdatedProvisions { + fn new( + all_collections: &AllCollections, + do_update: &DoUpdate, + values: V, + ) -> Self + where + V: diesel::AsChangeset, + ::Changeset: + QueryFragment + Send + 'static, + { + use virtual_resource_provisioning::dsl; + + Self { + query: Box::new( + diesel::update(dsl::virtual_resource_provisioning) + .set(values) + .filter( + dsl::id.eq_any( + all_collections + .query_source() + .select(all_collections::id), + ), + ) + .filter( + do_update + .query_source() + .select(do_update::update) + .single_value() + .assume_not_null(), + ) + .returning(virtual_resource_provisioning::all_columns), + ), + } + } +} + +// This structure wraps a query, such that it can be used within a CTE. +// +// It generates a name that can be used by the "CteBuilder", but does not +// implement "AsQuerySource". This basically means: +// - It can be used to add data-modifying statements to the CTE +// - The result of the query cannot be referenced by subsequent queries +// +// NOTE: The name for each CTE arm should be unique, so this shouldn't be used +// multiple times within a single CTE. This restriction could be removed by +// generating unique identifiers. +struct UnreferenceableSubquery(Q); + +impl QueryFragment for UnreferenceableSubquery +where + Q: QueryFragment + Send + 'static, +{ + fn walk_ast<'a>( + &'a self, + mut out: diesel::query_builder::AstPass<'_, 'a, Pg>, + ) -> diesel::QueryResult<()> { + out.push_identifier("unused_cte_arm")?; + Ok(()) + } +} + +impl crate::db::subquery::Subquery for UnreferenceableSubquery +where + Q: QueryFragment + Send + 'static, +{ + fn query(&self) -> &dyn QueryFragment { + &self.0 + } +} + /// Constructs a CTE for updating resource provisioning information in all /// collections for a particular object. #[derive(QueryId)] @@ -138,13 +260,28 @@ impl VirtualResourceProvisioningUpdate { // Generic utility for updating all collections including this resource, // even transitively. // - // Includes: + // Propagated updates include: // - Project // - Organization // - Silo // - Fleet - fn apply_update(project_id: uuid::Uuid, values: V) -> Self + // + // Arguments: + // - do_update: A boolean SQL query to answer the question: "Should this update + // be applied"? This query is necessary for idempotency. + // - update: A SQL query to actually modify the resource record. Generally + // this is an "INSERT", "UPDATE", or "DELETE". + // - project_id: The project to which the resource belongs. + // - values: The updated values to propagate through collections (iff + // "do_update" evaluates to "true"). + fn apply_update( + do_update: DoUpdate, + update: U, + project_id: uuid::Uuid, + values: V, + ) -> Self where + U: QueryFragment + crate::db::subquery::Subquery + Send + 'static, V: diesel::AsChangeset, ::Changeset: QueryFragment + Send + 'static, @@ -158,16 +295,15 @@ impl VirtualResourceProvisioningUpdate { &parent_silo, &parent_fleet, ); + let updated_collections = + UpdatedProvisions::new(&all_collections, &do_update, values); - use virtual_resource_provisioning::dsl; - - let final_update = Box::new( - diesel::update(dsl::virtual_resource_provisioning) - .set(values) - .filter(dsl::id.eq_any( - all_collections.query_source().select(all_collections::id), - )) - .returning(VirtualResourceProvisioning::as_returning()), + // TODO: Do we want to select from "all_collections" instead? Seems more + // idempotent; it'll work even when we don't update anything... + let final_select = Box::new( + updated_collections + .query_source() + .select(VirtualResourceProvisioning::as_select()), ); let cte = CteBuilder::new() @@ -175,34 +311,135 @@ impl VirtualResourceProvisioningUpdate { .add_subquery(parent_silo) .add_subquery(parent_fleet) .add_subquery(all_collections) - .build(final_update); + .add_subquery(do_update) + .add_subquery(update) + .add_subquery(updated_collections) + .build(final_select); Self { cte } } - pub fn new_update_disk( + pub fn new_insert_disk( + id: uuid::Uuid, + disk_byte_diff: i64, project_id: uuid::Uuid, - disk_bytes_diff: i64, ) -> Self { - use virtual_resource_provisioning::dsl; + use virtual_resource_provisioned::dsl as resource_dsl; + use virtual_resource_provisioning::dsl as collection_dsl; + + let mut provision = + VirtualResourceProvisioned::new(id, ResourceTypeProvisioned::Disk); + provision.virtual_disk_bytes_provisioned = disk_byte_diff; + Self::apply_update( + // We should insert the record if it does not already exist. + DoUpdate::new_for_insert(id), + // The query to actually insert the record. + UnreferenceableSubquery( + diesel::insert_into(resource_dsl::virtual_resource_provisioned) + .values(provision) + .on_conflict_do_nothing() + .returning(virtual_resource_provisioned::all_columns), + ), + // Within this project, org, silo, fleet... project_id, - dsl::virtual_disk_bytes_provisioned - .eq(dsl::virtual_disk_bytes_provisioned + disk_bytes_diff), + // ... We add the disk usage. + collection_dsl::virtual_disk_bytes_provisioned + .eq(collection_dsl::virtual_disk_bytes_provisioned + + disk_byte_diff), ) } - pub fn new_update_cpus_and_ram( + pub fn new_delete_disk( + id: uuid::Uuid, + disk_byte_diff: i64, project_id: uuid::Uuid, + ) -> Self { + use virtual_resource_provisioned::dsl as resource_dsl; + use virtual_resource_provisioning::dsl as collection_dsl; + + Self::apply_update( + // We should delete the record if it exists. + DoUpdate::new_for_delete(id), + // The query to actually delete the record. + UnreferenceableSubquery( + diesel::delete(resource_dsl::virtual_resource_provisioned) + .filter(resource_dsl::id.eq(id)) + .returning(virtual_resource_provisioned::all_columns), + ), + // Within this project, org, silo, fleet... + project_id, + // ... We subtract the disk usage. + collection_dsl::virtual_disk_bytes_provisioned + .eq(collection_dsl::virtual_disk_bytes_provisioned + + disk_byte_diff), + ) + } + + pub fn new_insert_instance( + id: uuid::Uuid, cpus_diff: i64, ram_diff: i64, + project_id: uuid::Uuid, ) -> Self { - use virtual_resource_provisioning::dsl; + use virtual_resource_provisioned::dsl as resource_dsl; + use virtual_resource_provisioning::dsl as collection_dsl; + + let mut provision = VirtualResourceProvisioned::new( + id, + ResourceTypeProvisioned::Instance, + ); + provision.cpus_provisioned = cpus_diff; + provision.ram_provisioned = ram_diff; + Self::apply_update( + // We should insert the record if it does not already exist. + DoUpdate::new_for_insert(id), + // The query to actually insert the record. + UnreferenceableSubquery( + diesel::insert_into(resource_dsl::virtual_resource_provisioned) + .values(provision) + .on_conflict_do_nothing() + .returning(virtual_resource_provisioned::all_columns), + ), + // Within this project, org, silo, fleet... + project_id, + // ... We update the resource usage. + ( + collection_dsl::cpus_provisioned + .eq(collection_dsl::cpus_provisioned + cpus_diff), + collection_dsl::ram_provisioned + .eq(collection_dsl::ram_provisioned + ram_diff), + ), + ) + } + + pub fn new_delete_instance( + id: uuid::Uuid, + cpus_diff: i64, + ram_diff: i64, + project_id: uuid::Uuid, + ) -> Self { + use virtual_resource_provisioned::dsl as resource_dsl; + use virtual_resource_provisioning::dsl as collection_dsl; + + Self::apply_update( + // We should delete the record if it exists. + DoUpdate::new_for_delete(id), + // The query to actually delete the record. + UnreferenceableSubquery( + diesel::delete(resource_dsl::virtual_resource_provisioned) + .filter(resource_dsl::id.eq(id)) + .returning(virtual_resource_provisioned::all_columns), + ), + // Within this project, org, silo, fleet... project_id, + // ... We update the resource usage. ( - dsl::cpus_provisioned.eq(dsl::cpus_provisioned + cpus_diff), - dsl::ram_provisioned.eq(dsl::ram_provisioned + ram_diff), + collection_dsl::cpus_provisioned + .eq(collection_dsl::cpus_provisioned + cpus_diff), + collection_dsl::ram_provisioned + .eq(collection_dsl::ram_provisioned + ram_diff), ), ) } diff --git a/nexus/src/populate.rs b/nexus/src/populate.rs index 97b30ea6a71..8df348ca6dc 100644 --- a/nexus/src/populate.rs +++ b/nexus/src/populate.rs @@ -286,7 +286,7 @@ impl Populator for PopulateFleet { opctx, db::model::VirtualResourceProvisioning::new( id, - db::model::CollectionType::Fleet, + db::model::CollectionTypeProvisioned::Fleet, ), ) .await?; From db41a2a31a1595f2561d4fd60e4b712567b548de Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 13 Dec 2022 13:55:24 -0500 Subject: [PATCH 40/80] Rename virtual_resource_provisioning to collection vs resource --- common/src/api/external/mod.rs | 2 +- common/src/sql/dbinit.sql | 4 +- nexus/db-model/src/lib.rs | 8 +- nexus/db-model/src/queries/mod.rs | 2 +- ...virtual_provisioning_collection_update.rs} | 6 +- nexus/db-model/src/schema.rs | 4 +- ....rs => virtual_provisioning_collection.rs} | 10 +- ...ed.rs => virtual_provisioning_resource.rs} | 10 +- nexus/src/app/disk.rs | 2 +- nexus/src/app/instance.rs | 2 +- nexus/src/app/sagas/disk_create.rs | 4 +- nexus/src/app/sagas/disk_delete.rs | 4 +- nexus/src/app/sagas/instance_create.rs | 4 +- nexus/src/app/sagas/snapshot_create.rs | 4 +- nexus/src/db/datastore/mod.rs | 12 +- nexus/src/db/datastore/organization.rs | 6 +- nexus/src/db/datastore/project.rs | 6 +- nexus/src/db/datastore/silo.rs | 16 +-- ....rs => virtual_provisioning_collection.rs} | 119 +++++++++--------- nexus/src/db/queries/mod.rs | 2 +- ...virtual_provisioning_collection_update.rs} | 95 +++++++------- nexus/src/populate.rs | 4 +- nexus/tests/integration_tests/disks.rs | 119 ++++++++++-------- nexus/tests/integration_tests/instances.rs | 32 ++--- nexus/tests/integration_tests/snapshots.rs | 12 +- 25 files changed, 258 insertions(+), 231 deletions(-) rename nexus/db-model/src/queries/{virtual_resource_provisioning_update.rs => virtual_provisioning_collection_update.rs} (84%) rename nexus/db-model/src/{virtual_resource_provisioning.rs => virtual_provisioning_collection.rs} (84%) rename nexus/db-model/src/{virtual_resource_provisioned.rs => virtual_provisioning_resource.rs} (82%) rename nexus/src/db/datastore/{virtual_resource_provisioning.rs => virtual_provisioning_collection.rs} (67%) rename nexus/src/db/queries/{virtual_resource_provisioning_update.rs => virtual_provisioning_collection_update.rs} (81%) diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index 19f9e5f5b22..1c0cd19e3f3 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -604,7 +604,7 @@ pub enum ResourceType { RoleBuiltin, UpdateAvailableArtifact, UserBuiltin, - VirtualResourceProvisioning, + VirtualProvision, Zpool, } diff --git a/common/src/sql/dbinit.sql b/common/src/sql/dbinit.sql index 7b51ab5c524..9c3226f1237 100644 --- a/common/src/sql/dbinit.sql +++ b/common/src/sql/dbinit.sql @@ -145,7 +145,7 @@ CREATE INDEX ON omicron.public.service ( -- - Organizations -- - Silos -- - Fleet -CREATE TABLE omicron.public.virtual_resource_provisioning ( +CREATE TABLE omicron.public.virtual_provisioning_collection ( -- Should match the UUID of the corresponding collection. id UUID PRIMARY KEY, -- Identifies the type of the collection. @@ -167,7 +167,7 @@ CREATE TABLE omicron.public.virtual_resource_provisioning ( -- - Disks -- - Instances -- - Snapshots -CREATE TABLE omicron.public.virtual_resource_provisioned ( +CREATE TABLE omicron.public.virtual_provisioning_resource ( -- Should match the UUID of the corresponding collection. id UUID PRIMARY KEY, -- Identifies the type of the resource. diff --git a/nexus/db-model/src/lib.rs b/nexus/db-model/src/lib.rs index 02398e3c8a0..e5770aef67b 100644 --- a/nexus/db-model/src/lib.rs +++ b/nexus/db-model/src/lib.rs @@ -63,8 +63,8 @@ mod ssh_key; mod u16; mod update_artifact; mod user_builtin; -mod virtual_resource_provisioned; -mod virtual_resource_provisioning; +mod virtual_provisioning_collection; +mod virtual_provisioning_resource; mod vni; mod volume; mod vpc; @@ -130,8 +130,8 @@ pub use snapshot::*; pub use ssh_key::*; pub use update_artifact::*; pub use user_builtin::*; -pub use virtual_resource_provisioned::*; -pub use virtual_resource_provisioning::*; +pub use virtual_provisioning_collection::*; +pub use virtual_provisioning_resource::*; pub use vni::*; pub use volume::*; pub use vpc::*; diff --git a/nexus/db-model/src/queries/mod.rs b/nexus/db-model/src/queries/mod.rs index b9fa7c46086..7724d48bab9 100644 --- a/nexus/db-model/src/queries/mod.rs +++ b/nexus/db-model/src/queries/mod.rs @@ -5,4 +5,4 @@ //! Subqueries used in CTEs. pub mod region_allocation; -pub mod virtual_resource_provisioning_update; +pub mod virtual_provisioning_collection_update; diff --git a/nexus/db-model/src/queries/virtual_resource_provisioning_update.rs b/nexus/db-model/src/queries/virtual_provisioning_collection_update.rs similarity index 84% rename from nexus/db-model/src/queries/virtual_resource_provisioning_update.rs rename to nexus/db-model/src/queries/virtual_provisioning_collection_update.rs index 59c99df3ca7..b2893347374 100644 --- a/nexus/db-model/src/queries/virtual_resource_provisioning_update.rs +++ b/nexus/db-model/src/queries/virtual_provisioning_collection_update.rs @@ -4,12 +4,12 @@ //! Describes the resource provisioning update CTE //! -//! Refer to +//! Refer to //! for the construction of this query. use crate::schema::organization; use crate::schema::silo; -use crate::schema::virtual_resource_provisioning; +use crate::schema::virtual_provisioning_collection; table! { parent_org { @@ -45,7 +45,7 @@ diesel::allow_tables_to_appear_in_same_query!(organization, parent_org,); diesel::allow_tables_to_appear_in_same_query!(silo, parent_silo,); diesel::allow_tables_to_appear_in_same_query!( - virtual_resource_provisioning, + virtual_provisioning_collection, parent_org, parent_silo, parent_fleet, diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index ea7347aaddd..899d1b10700 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -417,7 +417,7 @@ table! { } table! { - virtual_resource_provisioning { + virtual_provisioning_collection { id -> Uuid, collection_type -> Text, virtual_disk_bytes_provisioned -> Int8, @@ -427,7 +427,7 @@ table! { } table! { - virtual_resource_provisioned { + virtual_provisioning_resource { id -> Uuid, resource_type -> Text, virtual_disk_bytes_provisioned -> Int8, diff --git a/nexus/db-model/src/virtual_resource_provisioning.rs b/nexus/db-model/src/virtual_provisioning_collection.rs similarity index 84% rename from nexus/db-model/src/virtual_resource_provisioning.rs rename to nexus/db-model/src/virtual_provisioning_collection.rs index e5f245b8f53..a18b8d8153a 100644 --- a/nexus/db-model/src/virtual_resource_provisioning.rs +++ b/nexus/db-model/src/virtual_provisioning_collection.rs @@ -2,7 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use crate::schema::virtual_resource_provisioning; +use crate::schema::virtual_provisioning_collection; use uuid::Uuid; #[derive(Debug)] @@ -26,10 +26,10 @@ impl std::fmt::Display for CollectionTypeProvisioned { } } -/// Describes virtual_resource_provisioning for a collection +/// Describes virtual_provisioning_collection for a collection #[derive(Clone, Selectable, Queryable, Insertable, Debug)] -#[diesel(table_name = virtual_resource_provisioning)] -pub struct VirtualResourceProvisioning { +#[diesel(table_name = virtual_provisioning_collection)] +pub struct VirtualProvisioningCollection { pub id: Uuid, pub collection_type: String, @@ -38,7 +38,7 @@ pub struct VirtualResourceProvisioning { pub ram_provisioned: i64, } -impl VirtualResourceProvisioning { +impl VirtualProvisioningCollection { pub fn new(id: Uuid, collection_type: CollectionTypeProvisioned) -> Self { Self { id, diff --git a/nexus/db-model/src/virtual_resource_provisioned.rs b/nexus/db-model/src/virtual_provisioning_resource.rs similarity index 82% rename from nexus/db-model/src/virtual_resource_provisioned.rs rename to nexus/db-model/src/virtual_provisioning_resource.rs index 81d61781705..1ed9c4227d2 100644 --- a/nexus/db-model/src/virtual_resource_provisioned.rs +++ b/nexus/db-model/src/virtual_provisioning_resource.rs @@ -2,7 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use crate::schema::virtual_resource_provisioned; +use crate::schema::virtual_provisioning_resource; use uuid::Uuid; #[derive(Debug)] @@ -20,10 +20,10 @@ impl std::fmt::Display for ResourceTypeProvisioned { } } -/// Describes virtual_resource_provisioned for a resource. +/// Describes virtual_provisioning_resource for a resource. #[derive(Clone, Selectable, Queryable, Insertable, Debug)] -#[diesel(table_name = virtual_resource_provisioned)] -pub struct VirtualResourceProvisioned { +#[diesel(table_name = virtual_provisioning_resource)] +pub struct VirtualProvisioningResource { pub id: Uuid, pub resource_type: String, @@ -32,7 +32,7 @@ pub struct VirtualResourceProvisioned { pub ram_provisioned: i64, } -impl VirtualResourceProvisioned { +impl VirtualProvisioningResource { pub fn new(id: Uuid, resource_type: ResourceTypeProvisioned) -> Self { Self { id, diff --git a/nexus/src/app/disk.rs b/nexus/src/app/disk.rs index 035d8234e6f..556cd9fcc06 100644 --- a/nexus/src/app/disk.rs +++ b/nexus/src/app/disk.rs @@ -514,7 +514,7 @@ impl super::Nexus { // TODO: This should exist within a saga self.db_datastore - .virtual_resource_provisioning_delete_disk( + .virtual_provisioning_collection_delete_disk( &opctx, db_snapshot.id(), project.id(), diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index b75cfcc1235..1a537a6fb90 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -299,7 +299,7 @@ impl super::Nexus { .project_delete_instance(opctx, &authz_instance) .await?; self.db_datastore - .virtual_resource_provisioning_delete_instance( + .virtual_provisioning_collection_delete_instance( &opctx, instance.id(), instance.project_id, diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index 6551e9961ee..247f412bcd5 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -291,7 +291,7 @@ async fn sdc_account_space( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .virtual_resource_provisioning_insert_disk( + .virtual_provisioning_collection_insert_disk( &opctx, disk_created.id(), params.project_id, @@ -319,7 +319,7 @@ async fn sdc_account_space_undo( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .virtual_resource_provisioning_delete_disk( + .virtual_provisioning_collection_delete_disk( &opctx, disk_created.id(), params.project_id, diff --git a/nexus/src/app/sagas/disk_delete.rs b/nexus/src/app/sagas/disk_delete.rs index a79b8c54cf8..2ff6f2e214f 100644 --- a/nexus/src/app/sagas/disk_delete.rs +++ b/nexus/src/app/sagas/disk_delete.rs @@ -114,7 +114,7 @@ async fn sdd_account_space( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .virtual_resource_provisioning_delete_disk( + .virtual_provisioning_collection_delete_disk( &opctx, deleted_disk.id(), params.project_id, @@ -142,7 +142,7 @@ async fn sdd_account_space_undo( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .virtual_resource_provisioning_insert_disk( + .virtual_provisioning_collection_insert_disk( &opctx, deleted_disk.id(), params.project_id, diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index 3566b8ef13c..521f60ba77c 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -828,7 +828,7 @@ async fn sic_account_resources( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .virtual_resource_provisioning_insert_instance( + .virtual_provisioning_collection_insert_instance( &opctx, instance_id, params.project_id, @@ -857,7 +857,7 @@ async fn sic_account_resources_undo( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .virtual_resource_provisioning_delete_instance( + .virtual_provisioning_collection_delete_instance( &opctx, instance_id, params.project_id, diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index 1328d720e20..7d0b49ea080 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -551,7 +551,7 @@ async fn ssc_account_space( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .virtual_resource_provisioning_insert_disk( + .virtual_provisioning_collection_insert_disk( &opctx, snapshot_created.id(), params.project_id, @@ -580,7 +580,7 @@ async fn ssc_account_space_undo( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .virtual_resource_provisioning_delete_disk( + .virtual_provisioning_collection_delete_disk( &opctx, snapshot_created.id(), params.project_id, diff --git a/nexus/src/db/datastore/mod.rs b/nexus/src/db/datastore/mod.rs index a30c1654c2b..803edb5851f 100644 --- a/nexus/src/db/datastore/mod.rs +++ b/nexus/src/db/datastore/mod.rs @@ -68,7 +68,7 @@ mod sled; mod snapshot; mod ssh_key; mod update; -mod virtual_resource_provisioning; +mod virtual_provisioning_collection; mod volume; mod vpc; mod zpool; @@ -106,8 +106,8 @@ impl RunnableQuery for T where pub struct DataStore { pool: Arc, - virtual_resource_provisioning_producer: - virtual_resource_provisioning::Producer, + virtual_provisioning_collection_producer: + virtual_provisioning_collection::Producer, } // The majority of `DataStore`'s methods live in our submodules as a concession @@ -117,15 +117,15 @@ impl DataStore { pub fn new(pool: Arc) -> Self { DataStore { pool, - virtual_resource_provisioning_producer: - virtual_resource_provisioning::Producer::new(), + virtual_provisioning_collection_producer: + virtual_provisioning_collection::Producer::new(), } } pub fn register_producers(&self, registry: &ProducerRegistry) { registry .register_producer( - self.virtual_resource_provisioning_producer.clone(), + self.virtual_provisioning_collection_producer.clone(), ) .unwrap(); } diff --git a/nexus/src/db/datastore/organization.rs b/nexus/src/db/datastore/organization.rs index 298b99f5e26..a5ac7b49886 100644 --- a/nexus/src/db/datastore/organization.rs +++ b/nexus/src/db/datastore/organization.rs @@ -20,7 +20,7 @@ use crate::db::model::Name; use crate::db::model::Organization; use crate::db::model::OrganizationUpdate; use crate::db::model::Silo; -use crate::db::model::VirtualResourceProvisioning; +use crate::db::model::VirtualProvisioningCollection; use crate::db::pagination::paginated; use crate::external_api::params; use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl}; @@ -84,9 +84,9 @@ impl DataStore { } })?; - self.virtual_resource_provisioning_create_on_connection( + self.virtual_provisioning_collection_create_on_connection( &conn, - VirtualResourceProvisioning::new( + VirtualProvisioningCollection::new( org.id(), CollectionTypeProvisioned::Organization, ), diff --git a/nexus/src/db/datastore/project.rs b/nexus/src/db/datastore/project.rs index 36adf9d394c..d70de2699aa 100644 --- a/nexus/src/db/datastore/project.rs +++ b/nexus/src/db/datastore/project.rs @@ -19,7 +19,7 @@ use crate::db::model::Name; use crate::db::model::Organization; use crate::db::model::Project; use crate::db::model::ProjectUpdate; -use crate::db::model::VirtualResourceProvisioning; +use crate::db::model::VirtualProvisioningCollection; use crate::db::pagination::paginated; use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl}; use chrono::Utc; @@ -77,9 +77,9 @@ impl DataStore { })?; // Create resource provisioning for the project. - self.virtual_resource_provisioning_create_on_connection( + self.virtual_provisioning_collection_create_on_connection( &conn, - VirtualResourceProvisioning::new( + VirtualProvisioningCollection::new( project.id(), CollectionTypeProvisioned::Project, ), diff --git a/nexus/src/db/datastore/silo.rs b/nexus/src/db/datastore/silo.rs index 18cf45b5c13..e3d3a3eb559 100644 --- a/nexus/src/db/datastore/silo.rs +++ b/nexus/src/db/datastore/silo.rs @@ -18,7 +18,7 @@ use crate::db::identity::Resource; use crate::db::model::CollectionTypeProvisioned; use crate::db::model::Name; use crate::db::model::Silo; -use crate::db::model::VirtualResourceProvisioning; +use crate::db::model::VirtualProvisioningCollection; use crate::db::pagination::paginated; use crate::external_api::params; use crate::external_api::shared; @@ -56,9 +56,9 @@ impl DataStore { })?; info!(opctx.log, "created {} built-in silos", count); - self.virtual_resource_provisioning_create( + self.virtual_provisioning_collection_create( opctx, - VirtualResourceProvisioning::new( + VirtualProvisioningCollection::new( DEFAULT_SILO.id(), CollectionTypeProvisioned::Silo, ), @@ -150,18 +150,18 @@ impl DataStore { .await? .transaction_async(|conn| async move { let silo = silo_create_query.get_result_async(&conn).await?; - use db::schema::virtual_resource_provisioning::dsl; - diesel::insert_into(dsl::virtual_resource_provisioning) - .values(VirtualResourceProvisioning::new( + use db::schema::virtual_provisioning_collection::dsl; + diesel::insert_into(dsl::virtual_provisioning_collection) + .values(VirtualProvisioningCollection::new( silo.id(), CollectionTypeProvisioned::Silo, )) .execute_async(&conn) .await?; - self.virtual_resource_provisioning_create_on_connection( + self.virtual_provisioning_collection_create_on_connection( &conn, - VirtualResourceProvisioning::new( + VirtualProvisioningCollection::new( DEFAULT_SILO.id(), CollectionTypeProvisioned::Silo, ), diff --git a/nexus/src/db/datastore/virtual_resource_provisioning.rs b/nexus/src/db/datastore/virtual_provisioning_collection.rs similarity index 67% rename from nexus/src/db/datastore/virtual_resource_provisioning.rs rename to nexus/src/db/datastore/virtual_provisioning_collection.rs index cf4e5c5b055..a7709d83c17 100644 --- a/nexus/src/db/datastore/virtual_resource_provisioning.rs +++ b/nexus/src/db/datastore/virtual_provisioning_collection.rs @@ -2,16 +2,16 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! [`DataStore`] methods on [`VirtualResourceProvisioning`]s. +//! [`DataStore`] methods on [`VirtualProvisioningCollection`]s. use super::DataStore; use crate::context::OpContext; use crate::db; use crate::db::error::public_error_from_diesel_pool; use crate::db::error::ErrorHandler; -use crate::db::model::VirtualResourceProvisioning; +use crate::db::model::VirtualProvisioningCollection; use crate::db::pool::DbConnection; -use crate::db::queries::virtual_resource_provisioning_update::VirtualResourceProvisioningUpdate; +use crate::db::queries::virtual_provisioning_collection_update::VirtualProvisioningCollectionUpdate; use async_bb8_diesel::{AsyncRunQueryDsl, PoolError}; use diesel::prelude::*; use omicron_common::api::external::{ @@ -47,7 +47,7 @@ struct RamProvisioned { bytes: i64, } -/// An oximeter producer for reporting [`VirtualResourceProvisioning`] information to Clickhouse. +/// An oximeter producer for reporting [`VirtualProvisioningCollection`] information to Clickhouse. /// /// This producer collects samples whenever the database record for a collection /// is created or updated. This implies that the CockroachDB record is always @@ -65,7 +65,7 @@ impl Producer { fn append_disk_metrics( &self, - provisions: &Vec, + provisions: &Vec, ) { let new_samples = provisions .iter() @@ -84,7 +84,7 @@ impl Producer { fn append_cpu_metrics( &self, - provisions: &Vec, + provisions: &Vec, ) { let new_samples = provisions .iter() @@ -122,37 +122,37 @@ impl oximeter::Producer for Producer { } impl DataStore { - /// Create a [`VirtualResourceProvisioning`] object. - pub async fn virtual_resource_provisioning_create( + /// Create a [`VirtualProvisioningCollection`] object. + pub async fn virtual_provisioning_collection_create( &self, opctx: &OpContext, - virtual_resource_provisioning: VirtualResourceProvisioning, - ) -> Result, Error> { + virtual_provisioning_collection: VirtualProvisioningCollection, + ) -> Result, Error> { let pool = self.pool_authorized(opctx).await?; - self.virtual_resource_provisioning_create_on_connection( + self.virtual_provisioning_collection_create_on_connection( pool, - virtual_resource_provisioning, + virtual_provisioning_collection, ) .await } - pub(crate) async fn virtual_resource_provisioning_create_on_connection< + pub(crate) async fn virtual_provisioning_collection_create_on_connection< ConnErr, >( &self, conn: &(impl async_bb8_diesel::AsyncConnection + Sync), - virtual_resource_provisioning: VirtualResourceProvisioning, - ) -> Result, Error> + virtual_provisioning_collection: VirtualProvisioningCollection, + ) -> Result, Error> where ConnErr: From + Send + 'static, PoolError: From, { - use db::schema::virtual_resource_provisioning::dsl; + use db::schema::virtual_provisioning_collection::dsl; - let provisions: Vec = - diesel::insert_into(dsl::virtual_resource_provisioning) - .values(virtual_resource_provisioning) + let provisions: Vec = + diesel::insert_into(dsl::virtual_provisioning_collection) + .values(virtual_provisioning_collection) .on_conflict_do_nothing() .get_results_async(conn) .await @@ -162,46 +162,47 @@ impl DataStore { ErrorHandler::Server, ) })?; - self.virtual_resource_provisioning_producer + self.virtual_provisioning_collection_producer .append_disk_metrics(&provisions); - self.virtual_resource_provisioning_producer + self.virtual_provisioning_collection_producer .append_cpu_metrics(&provisions); Ok(provisions) } - pub async fn virtual_resource_provisioning_get( + pub async fn virtual_provisioning_collection_get( &self, opctx: &OpContext, id: Uuid, - ) -> Result { - use db::schema::virtual_resource_provisioning::dsl; + ) -> Result { + use db::schema::virtual_provisioning_collection::dsl; - let virtual_resource_provisioning = dsl::virtual_resource_provisioning - .find(id) - .select(VirtualResourceProvisioning::as_select()) - .get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByLookup( - ResourceType::VirtualResourceProvisioning, - LookupType::ById(id), - ), - ) - })?; - Ok(virtual_resource_provisioning) + let virtual_provisioning_collection = + dsl::virtual_provisioning_collection + .find(id) + .select(VirtualProvisioningCollection::as_select()) + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::VirtualProvision, + LookupType::ById(id), + ), + ) + })?; + Ok(virtual_provisioning_collection) } - /// Delete a [`VirtualResourceProvisioning`] object. - pub async fn virtual_resource_provisioning_delete( + /// Delete a [`VirtualProvisioningCollection`] object. + pub async fn virtual_provisioning_collection_delete( &self, opctx: &OpContext, id: Uuid, ) -> DeleteResult { - use db::schema::virtual_resource_provisioning::dsl; + use db::schema::virtual_provisioning_collection::dsl; - diesel::delete(dsl::virtual_resource_provisioning) + diesel::delete(dsl::virtual_provisioning_collection) .filter(dsl::id.eq(id)) .execute_async(self.pool_authorized(opctx).await?) .await @@ -220,14 +221,14 @@ impl DataStore { // calls? Maybe it could be an optional helper? /// Transitively updates all provisioned disk provisions from project -> fleet. - pub async fn virtual_resource_provisioning_insert_disk( + pub async fn virtual_provisioning_collection_insert_disk( &self, opctx: &OpContext, id: Uuid, project_id: Uuid, disk_byte_diff: i64, - ) -> Result, Error> { - let provisions = VirtualResourceProvisioningUpdate::new_insert_disk( + ) -> Result, Error> { + let provisions = VirtualProvisioningCollectionUpdate::new_insert_disk( id, disk_byte_diff, project_id, @@ -235,20 +236,20 @@ impl DataStore { .get_results_async(self.pool_authorized(opctx).await?) .await .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; - self.virtual_resource_provisioning_producer + self.virtual_provisioning_collection_producer .append_disk_metrics(&provisions); Ok(provisions) } /// Transitively updates all provisioned disk provisions from project -> fleet. - pub async fn virtual_resource_provisioning_delete_disk( + pub async fn virtual_provisioning_collection_delete_disk( &self, opctx: &OpContext, id: Uuid, project_id: Uuid, disk_byte_diff: i64, - ) -> Result, Error> { - let provisions = VirtualResourceProvisioningUpdate::new_delete_disk( + ) -> Result, Error> { + let provisions = VirtualProvisioningCollectionUpdate::new_delete_disk( id, disk_byte_diff, project_id, @@ -256,22 +257,22 @@ impl DataStore { .get_results_async(self.pool_authorized(opctx).await?) .await .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; - self.virtual_resource_provisioning_producer + self.virtual_provisioning_collection_producer .append_disk_metrics(&provisions); Ok(provisions) } /// Transitively updates all CPU/RAM provisions from project -> fleet. - pub async fn virtual_resource_provisioning_insert_instance( + pub async fn virtual_provisioning_collection_insert_instance( &self, opctx: &OpContext, id: Uuid, project_id: Uuid, cpus_diff: i64, ram_diff: i64, - ) -> Result, Error> { + ) -> Result, Error> { let provisions = - VirtualResourceProvisioningUpdate::new_insert_instance( + VirtualProvisioningCollectionUpdate::new_insert_instance( id, cpus_diff, ram_diff, project_id, ) .get_results_async(self.pool_authorized(opctx).await?) @@ -279,22 +280,22 @@ impl DataStore { .map_err(|e| { public_error_from_diesel_pool(e, ErrorHandler::Server) })?; - self.virtual_resource_provisioning_producer + self.virtual_provisioning_collection_producer .append_cpu_metrics(&provisions); Ok(provisions) } /// Transitively updates all CPU/RAM provisions from project -> fleet. - pub async fn virtual_resource_provisioning_delete_instance( + pub async fn virtual_provisioning_collection_delete_instance( &self, opctx: &OpContext, id: Uuid, project_id: Uuid, cpus_diff: i64, ram_diff: i64, - ) -> Result, Error> { + ) -> Result, Error> { let provisions = - VirtualResourceProvisioningUpdate::new_delete_instance( + VirtualProvisioningCollectionUpdate::new_delete_instance( id, cpus_diff, ram_diff, project_id, ) .get_results_async(self.pool_authorized(opctx).await?) @@ -302,7 +303,7 @@ impl DataStore { .map_err(|e| { public_error_from_diesel_pool(e, ErrorHandler::Server) })?; - self.virtual_resource_provisioning_producer + self.virtual_provisioning_collection_producer .append_cpu_metrics(&provisions); Ok(provisions) } diff --git a/nexus/src/db/queries/mod.rs b/nexus/src/db/queries/mod.rs index dcda0e3d4d9..d413dce6aad 100644 --- a/nexus/src/db/queries/mod.rs +++ b/nexus/src/db/queries/mod.rs @@ -11,6 +11,6 @@ pub mod ip_pool; mod next_item; pub mod network_interface; pub mod region_allocation; -pub mod virtual_resource_provisioning_update; +pub mod virtual_provisioning_collection_update; pub mod vpc; pub mod vpc_subnet; diff --git a/nexus/src/db/queries/virtual_resource_provisioning_update.rs b/nexus/src/db/queries/virtual_provisioning_collection_update.rs similarity index 81% rename from nexus/src/db/queries/virtual_resource_provisioning_update.rs rename to nexus/src/db/queries/virtual_provisioning_collection_update.rs index 41a0cd9a146..8b83ce34a1e 100644 --- a/nexus/src/db/queries/virtual_resource_provisioning_update.rs +++ b/nexus/src/db/queries/virtual_provisioning_collection_update.rs @@ -6,11 +6,11 @@ use crate::db::alias::ExpressionAlias; use crate::db::model::ResourceTypeProvisioned; -use crate::db::model::VirtualResourceProvisioned; -use crate::db::model::VirtualResourceProvisioning; +use crate::db::model::VirtualProvisioningCollection; +use crate::db::model::VirtualProvisioningResource; use crate::db::pool::DbConnection; -use crate::db::schema::virtual_resource_provisioned; -use crate::db::schema::virtual_resource_provisioning; +use crate::db::schema::virtual_provisioning_collection; +use crate::db::schema::virtual_provisioning_resource; use crate::db::subquery::{AsQuerySource, Cte, CteBuilder, CteQuery}; use db_macros::Subquery; use diesel::pg::Pg; @@ -19,7 +19,7 @@ use diesel::{ sql_types, CombineDsl, ExpressionMethods, IntoSql, NullableExpressionMethods, QueryDsl, RunQueryDsl, SelectableHelper, }; -use nexus_db_model::queries::virtual_resource_provisioning_update::{ +use nexus_db_model::queries::virtual_provisioning_collection_update::{ all_collections, do_update, parent_fleet, parent_org, parent_silo, }; @@ -138,9 +138,9 @@ struct DoUpdate { impl DoUpdate { fn new_for_insert(id: uuid::Uuid) -> Self { - use virtual_resource_provisioned::dsl; + use virtual_provisioning_resource::dsl; - let not_allocted = dsl::virtual_resource_provisioned + let not_allocted = dsl::virtual_provisioning_resource .find(id) .count() .single_value() @@ -155,9 +155,9 @@ impl DoUpdate { } fn new_for_delete(id: uuid::Uuid) -> Self { - use virtual_resource_provisioned::dsl; + use virtual_provisioning_resource::dsl; - let already_allocated = dsl::virtual_resource_provisioned + let already_allocated = dsl::virtual_provisioning_resource .find(id) .count() .single_value() @@ -173,9 +173,10 @@ impl DoUpdate { } #[derive(Subquery, QueryId)] -#[subquery(name = virtual_resource_provisioning)] +#[subquery(name = virtual_provisioning_collection)] struct UpdatedProvisions { - query: Box>, + query: + Box>, } impl UpdatedProvisions { @@ -185,15 +186,15 @@ impl UpdatedProvisions { values: V, ) -> Self where - V: diesel::AsChangeset, + V: diesel::AsChangeset, ::Changeset: QueryFragment + Send + 'static, { - use virtual_resource_provisioning::dsl; + use virtual_provisioning_collection::dsl; Self { query: Box::new( - diesel::update(dsl::virtual_resource_provisioning) + diesel::update(dsl::virtual_provisioning_collection) .set(values) .filter( dsl::id.eq_any( @@ -209,7 +210,7 @@ impl UpdatedProvisions { .single_value() .assume_not_null(), ) - .returning(virtual_resource_provisioning::all_columns), + .returning(virtual_provisioning_collection::all_columns), ), } } @@ -252,11 +253,11 @@ where /// Constructs a CTE for updating resource provisioning information in all /// collections for a particular object. #[derive(QueryId)] -pub struct VirtualResourceProvisioningUpdate { +pub struct VirtualProvisioningCollectionUpdate { cte: Cte, } -impl VirtualResourceProvisioningUpdate { +impl VirtualProvisioningCollectionUpdate { // Generic utility for updating all collections including this resource, // even transitively. // @@ -282,7 +283,7 @@ impl VirtualResourceProvisioningUpdate { ) -> Self where U: QueryFragment + crate::db::subquery::Subquery + Send + 'static, - V: diesel::AsChangeset, + V: diesel::AsChangeset, ::Changeset: QueryFragment + Send + 'static, { @@ -303,7 +304,7 @@ impl VirtualResourceProvisioningUpdate { let final_select = Box::new( updated_collections .query_source() - .select(VirtualResourceProvisioning::as_select()), + .select(VirtualProvisioningCollection::as_select()), ); let cte = CteBuilder::new() @@ -324,11 +325,11 @@ impl VirtualResourceProvisioningUpdate { disk_byte_diff: i64, project_id: uuid::Uuid, ) -> Self { - use virtual_resource_provisioned::dsl as resource_dsl; - use virtual_resource_provisioning::dsl as collection_dsl; + use virtual_provisioning_collection::dsl as collection_dsl; + use virtual_provisioning_resource::dsl as resource_dsl; let mut provision = - VirtualResourceProvisioned::new(id, ResourceTypeProvisioned::Disk); + VirtualProvisioningResource::new(id, ResourceTypeProvisioned::Disk); provision.virtual_disk_bytes_provisioned = disk_byte_diff; Self::apply_update( @@ -336,10 +337,12 @@ impl VirtualResourceProvisioningUpdate { DoUpdate::new_for_insert(id), // The query to actually insert the record. UnreferenceableSubquery( - diesel::insert_into(resource_dsl::virtual_resource_provisioned) - .values(provision) - .on_conflict_do_nothing() - .returning(virtual_resource_provisioned::all_columns), + diesel::insert_into( + resource_dsl::virtual_provisioning_resource, + ) + .values(provision) + .on_conflict_do_nothing() + .returning(virtual_provisioning_resource::all_columns), ), // Within this project, org, silo, fleet... project_id, @@ -355,17 +358,17 @@ impl VirtualResourceProvisioningUpdate { disk_byte_diff: i64, project_id: uuid::Uuid, ) -> Self { - use virtual_resource_provisioned::dsl as resource_dsl; - use virtual_resource_provisioning::dsl as collection_dsl; + use virtual_provisioning_collection::dsl as collection_dsl; + use virtual_provisioning_resource::dsl as resource_dsl; Self::apply_update( // We should delete the record if it exists. DoUpdate::new_for_delete(id), // The query to actually delete the record. UnreferenceableSubquery( - diesel::delete(resource_dsl::virtual_resource_provisioned) + diesel::delete(resource_dsl::virtual_provisioning_resource) .filter(resource_dsl::id.eq(id)) - .returning(virtual_resource_provisioned::all_columns), + .returning(virtual_provisioning_resource::all_columns), ), // Within this project, org, silo, fleet... project_id, @@ -382,10 +385,10 @@ impl VirtualResourceProvisioningUpdate { ram_diff: i64, project_id: uuid::Uuid, ) -> Self { - use virtual_resource_provisioned::dsl as resource_dsl; - use virtual_resource_provisioning::dsl as collection_dsl; + use virtual_provisioning_collection::dsl as collection_dsl; + use virtual_provisioning_resource::dsl as resource_dsl; - let mut provision = VirtualResourceProvisioned::new( + let mut provision = VirtualProvisioningResource::new( id, ResourceTypeProvisioned::Instance, ); @@ -397,10 +400,12 @@ impl VirtualResourceProvisioningUpdate { DoUpdate::new_for_insert(id), // The query to actually insert the record. UnreferenceableSubquery( - diesel::insert_into(resource_dsl::virtual_resource_provisioned) - .values(provision) - .on_conflict_do_nothing() - .returning(virtual_resource_provisioned::all_columns), + diesel::insert_into( + resource_dsl::virtual_provisioning_resource, + ) + .values(provision) + .on_conflict_do_nothing() + .returning(virtual_provisioning_resource::all_columns), ), // Within this project, org, silo, fleet... project_id, @@ -420,17 +425,17 @@ impl VirtualResourceProvisioningUpdate { ram_diff: i64, project_id: uuid::Uuid, ) -> Self { - use virtual_resource_provisioned::dsl as resource_dsl; - use virtual_resource_provisioning::dsl as collection_dsl; + use virtual_provisioning_collection::dsl as collection_dsl; + use virtual_provisioning_resource::dsl as resource_dsl; Self::apply_update( // We should delete the record if it exists. DoUpdate::new_for_delete(id), // The query to actually delete the record. UnreferenceableSubquery( - diesel::delete(resource_dsl::virtual_resource_provisioned) + diesel::delete(resource_dsl::virtual_provisioning_resource) .filter(resource_dsl::id.eq(id)) - .returning(virtual_resource_provisioned::all_columns), + .returning(virtual_provisioning_resource::all_columns), ), // Within this project, org, silo, fleet... project_id, @@ -445,7 +450,7 @@ impl VirtualResourceProvisioningUpdate { } } -impl QueryFragment for VirtualResourceProvisioningUpdate { +impl QueryFragment for VirtualProvisioningCollectionUpdate { fn walk_ast<'a>( &'a self, mut out: AstPass<'_, 'a, Pg>, @@ -461,8 +466,8 @@ type SelectableSql = < >::SelectExpression as diesel::Expression >::SqlType; -impl Query for VirtualResourceProvisioningUpdate { - type SqlType = SelectableSql; +impl Query for VirtualProvisioningCollectionUpdate { + type SqlType = SelectableSql; } -impl RunQueryDsl for VirtualResourceProvisioningUpdate {} +impl RunQueryDsl for VirtualProvisioningCollectionUpdate {} diff --git a/nexus/src/populate.rs b/nexus/src/populate.rs index 8df348ca6dc..2c9bae5de4a 100644 --- a/nexus/src/populate.rs +++ b/nexus/src/populate.rs @@ -282,9 +282,9 @@ impl Populator for PopulateFleet { let id = *db::fixed_data::FLEET_ID; datastore.fleet_insert(opctx, &db::model::Fleet::new(id)).await?; datastore - .virtual_resource_provisioning_create( + .virtual_provisioning_collection_create( opctx, - db::model::VirtualResourceProvisioning::new( + db::model::VirtualProvisioningCollection::new( id, db::model::CollectionTypeProvisioned::Fleet, ), diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index 10e7da0a35c..b696ed6edab 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -902,7 +902,7 @@ async fn test_disk_too_big(cptestctx: &ControlPlaneTestContext) { } #[nexus_test] -async fn test_disk_virtual_resource_provisioning( +async fn test_disk_virtual_provisioning_collection( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; @@ -922,31 +922,46 @@ async fn test_disk_virtual_resource_provisioning( OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); // The project and organization should start as empty. - let virtual_resource_provisioning = datastore - .virtual_resource_provisioning_get(&opctx, project_id1) + let virtual_provisioning_collection = datastore + .virtual_provisioning_collection_get(&opctx, project_id1) .await .unwrap(); - assert_eq!(virtual_resource_provisioning.virtual_disk_bytes_provisioned, 0); - let virtual_resource_provisioning = datastore - .virtual_resource_provisioning_get(&opctx, project_id2) + assert_eq!( + virtual_provisioning_collection.virtual_disk_bytes_provisioned, + 0 + ); + let virtual_provisioning_collection = datastore + .virtual_provisioning_collection_get(&opctx, project_id2) .await .unwrap(); - assert_eq!(virtual_resource_provisioning.virtual_disk_bytes_provisioned, 0); - let virtual_resource_provisioning = datastore - .virtual_resource_provisioning_get(&opctx, org_id) + assert_eq!( + virtual_provisioning_collection.virtual_disk_bytes_provisioned, + 0 + ); + let virtual_provisioning_collection = datastore + .virtual_provisioning_collection_get(&opctx, org_id) .await .unwrap(); - assert_eq!(virtual_resource_provisioning.virtual_disk_bytes_provisioned, 0); - let virtual_resource_provisioning = datastore - .virtual_resource_provisioning_get(&opctx, *SILO_ID) + assert_eq!( + virtual_provisioning_collection.virtual_disk_bytes_provisioned, + 0 + ); + let virtual_provisioning_collection = datastore + .virtual_provisioning_collection_get(&opctx, *SILO_ID) .await .unwrap(); - assert_eq!(virtual_resource_provisioning.virtual_disk_bytes_provisioned, 0); - let virtual_resource_provisioning = datastore - .virtual_resource_provisioning_get(&opctx, *FLEET_ID) + assert_eq!( + virtual_provisioning_collection.virtual_disk_bytes_provisioned, + 0 + ); + let virtual_provisioning_collection = datastore + .virtual_provisioning_collection_get(&opctx, *FLEET_ID) .await .unwrap(); - assert_eq!(virtual_resource_provisioning.virtual_disk_bytes_provisioned, 0); + assert_eq!( + virtual_provisioning_collection.virtual_disk_bytes_provisioned, + 0 + ); // Ask for a 1 gibibyte disk in the first project. // @@ -974,41 +989,44 @@ async fn test_disk_virtual_resource_provisioning( .execute() .await .expect("unexpected failure creating 1 GiB disk"); - let virtual_resource_provisioning = datastore - .virtual_resource_provisioning_get(&opctx, project_id1) + let virtual_provisioning_collection = datastore + .virtual_provisioning_collection_get(&opctx, project_id1) .await .unwrap(); assert_eq!( - virtual_resource_provisioning.virtual_disk_bytes_provisioned, + virtual_provisioning_collection.virtual_disk_bytes_provisioned, disk_size.to_bytes() as i64 ); - let virtual_resource_provisioning = datastore - .virtual_resource_provisioning_get(&opctx, project_id2) + let virtual_provisioning_collection = datastore + .virtual_provisioning_collection_get(&opctx, project_id2) .await .unwrap(); - assert_eq!(virtual_resource_provisioning.virtual_disk_bytes_provisioned, 0); - let virtual_resource_provisioning = datastore - .virtual_resource_provisioning_get(&opctx, org_id) + assert_eq!( + virtual_provisioning_collection.virtual_disk_bytes_provisioned, + 0 + ); + let virtual_provisioning_collection = datastore + .virtual_provisioning_collection_get(&opctx, org_id) .await .unwrap(); assert_eq!( - virtual_resource_provisioning.virtual_disk_bytes_provisioned, + virtual_provisioning_collection.virtual_disk_bytes_provisioned, disk_size.to_bytes() as i64 ); - let virtual_resource_provisioning = datastore - .virtual_resource_provisioning_get(&opctx, *SILO_ID) + let virtual_provisioning_collection = datastore + .virtual_provisioning_collection_get(&opctx, *SILO_ID) .await .unwrap(); assert_eq!( - virtual_resource_provisioning.virtual_disk_bytes_provisioned, + virtual_provisioning_collection.virtual_disk_bytes_provisioned, disk_size.to_bytes() as i64 ); - let virtual_resource_provisioning = datastore - .virtual_resource_provisioning_get(&opctx, *FLEET_ID) + let virtual_provisioning_collection = datastore + .virtual_provisioning_collection_get(&opctx, *FLEET_ID) .await .unwrap(); assert_eq!( - virtual_resource_provisioning.virtual_disk_bytes_provisioned, + virtual_provisioning_collection.virtual_disk_bytes_provisioned, disk_size.to_bytes() as i64 ); @@ -1039,28 +1057,28 @@ async fn test_disk_virtual_resource_provisioning( .execute() .await .expect("unexpected failure creating 1 GiB disk"); - let virtual_resource_provisioning = datastore - .virtual_resource_provisioning_get(&opctx, project_id1) + let virtual_provisioning_collection = datastore + .virtual_provisioning_collection_get(&opctx, project_id1) .await .unwrap(); assert_eq!( - virtual_resource_provisioning.virtual_disk_bytes_provisioned, + virtual_provisioning_collection.virtual_disk_bytes_provisioned, disk_size.to_bytes() as i64 ); - let virtual_resource_provisioning = datastore - .virtual_resource_provisioning_get(&opctx, project_id2) + let virtual_provisioning_collection = datastore + .virtual_provisioning_collection_get(&opctx, project_id2) .await .unwrap(); assert_eq!( - virtual_resource_provisioning.virtual_disk_bytes_provisioned, + virtual_provisioning_collection.virtual_disk_bytes_provisioned, disk_size.to_bytes() as i64 ); - let virtual_resource_provisioning = datastore - .virtual_resource_provisioning_get(&opctx, org_id) + let virtual_provisioning_collection = datastore + .virtual_provisioning_collection_get(&opctx, org_id) .await .unwrap(); assert_eq!( - virtual_resource_provisioning.virtual_disk_bytes_provisioned, + virtual_provisioning_collection.virtual_disk_bytes_provisioned, 2 * disk_size.to_bytes() as i64 ); @@ -1072,25 +1090,28 @@ async fn test_disk_virtual_resource_provisioning( .execute() .await .expect("failed to delete disk"); - let virtual_resource_provisioning = datastore - .virtual_resource_provisioning_get(&opctx, project_id1) + let virtual_provisioning_collection = datastore + .virtual_provisioning_collection_get(&opctx, project_id1) .await .unwrap(); assert_eq!( - virtual_resource_provisioning.virtual_disk_bytes_provisioned, + virtual_provisioning_collection.virtual_disk_bytes_provisioned, disk_size.to_bytes() as i64 ); - let virtual_resource_provisioning = datastore - .virtual_resource_provisioning_get(&opctx, project_id2) + let virtual_provisioning_collection = datastore + .virtual_provisioning_collection_get(&opctx, project_id2) .await .unwrap(); - assert_eq!(virtual_resource_provisioning.virtual_disk_bytes_provisioned, 0); - let virtual_resource_provisioning = datastore - .virtual_resource_provisioning_get(&opctx, org_id) + assert_eq!( + virtual_provisioning_collection.virtual_disk_bytes_provisioned, + 0 + ); + let virtual_provisioning_collection = datastore + .virtual_provisioning_collection_get(&opctx, org_id) .await .unwrap(); assert_eq!( - virtual_resource_provisioning.virtual_disk_bytes_provisioned, + virtual_provisioning_collection.virtual_disk_bytes_provisioned, disk_size.to_bytes() as i64 ); } diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 0fc8646d358..0b4c6230f87 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -530,24 +530,24 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); - let virtual_resource_provisioning = datastore - .virtual_resource_provisioning_get(&opctx, project_id) + let virtual_provisioning_collection = datastore + .virtual_provisioning_collection_get(&opctx, project_id) .await .unwrap(); - assert_eq!(virtual_resource_provisioning.cpus_provisioned, 0); - assert_eq!(virtual_resource_provisioning.ram_provisioned, 0); + assert_eq!(virtual_provisioning_collection.cpus_provisioned, 0); + assert_eq!(virtual_provisioning_collection.ram_provisioned, 0); // Create an instance. let instance_url = format!("{}/just-rainsticks", url_instances); create_instance(client, ORGANIZATION_NAME, PROJECT_NAME, "just-rainsticks") .await; - let virtual_resource_provisioning = datastore - .virtual_resource_provisioning_get(&opctx, project_id) + let virtual_provisioning_collection = datastore + .virtual_provisioning_collection_get(&opctx, project_id) .await .unwrap(); - assert_eq!(virtual_resource_provisioning.cpus_provisioned, 4); + assert_eq!(virtual_provisioning_collection.cpus_provisioned, 4); assert_eq!( - virtual_resource_provisioning.ram_provisioned, + virtual_provisioning_collection.ram_provisioned, i64::try_from(ByteCount::from_gibibytes_u32(1).to_bytes()).unwrap(), ); @@ -564,13 +564,13 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { // However, for implementation reasons, this is complicated (we have a // tendency to update the runtime without checking the prior state, which // makes edge-triggered behavior trickier to notice). - let virtual_resource_provisioning = datastore - .virtual_resource_provisioning_get(&opctx, project_id) + let virtual_provisioning_collection = datastore + .virtual_provisioning_collection_get(&opctx, project_id) .await .unwrap(); - assert_eq!(virtual_resource_provisioning.cpus_provisioned, 4); + assert_eq!(virtual_provisioning_collection.cpus_provisioned, 4); assert_eq!( - virtual_resource_provisioning.ram_provisioned, + virtual_provisioning_collection.ram_provisioned, i64::try_from(ByteCount::from_gibibytes_u32(1).to_bytes()).unwrap(), ); @@ -581,12 +581,12 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { .await .unwrap(); - let virtual_resource_provisioning = datastore - .virtual_resource_provisioning_get(&opctx, project_id) + let virtual_provisioning_collection = datastore + .virtual_provisioning_collection_get(&opctx, project_id) .await .unwrap(); - assert_eq!(virtual_resource_provisioning.cpus_provisioned, 0); - assert_eq!(virtual_resource_provisioning.ram_provisioned, 0); + assert_eq!(virtual_provisioning_collection.cpus_provisioned, 0); + assert_eq!(virtual_provisioning_collection.ram_provisioned, 0); } #[nexus_test] diff --git a/nexus/tests/integration_tests/snapshots.rs b/nexus/tests/integration_tests/snapshots.rs index a8a884d66d0..b39722675f8 100644 --- a/nexus/tests/integration_tests/snapshots.rs +++ b/nexus/tests/integration_tests/snapshots.rs @@ -324,7 +324,7 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); let provision = datastore - .virtual_resource_provisioning_get(&opctx, project_id) + .virtual_provisioning_collection_get(&opctx, project_id) .await .unwrap(); assert_eq!( @@ -354,7 +354,7 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { assert_eq!(snapshot.disk_id, base_disk.identity.id); assert_eq!(snapshot.size, base_disk.size); let provision = datastore - .virtual_resource_provisioning_get(&opctx, project_id) + .virtual_provisioning_collection_get(&opctx, project_id) .await .unwrap(); assert_eq!( @@ -389,7 +389,7 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .unwrap(); let provision = datastore - .virtual_resource_provisioning_get(&opctx, project_id) + .virtual_provisioning_collection_get(&opctx, project_id) .await .unwrap(); assert_eq!( @@ -413,7 +413,7 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .unwrap(); let provision = datastore - .virtual_resource_provisioning_get(&opctx, project_id) + .virtual_provisioning_collection_get(&opctx, project_id) .await .unwrap(); assert_eq!( @@ -429,7 +429,7 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .await .expect("failed to delete disk"); let provision = datastore - .virtual_resource_provisioning_get(&opctx, project_id) + .virtual_provisioning_collection_get(&opctx, project_id) .await .unwrap(); assert_eq!( @@ -445,7 +445,7 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .await .expect("failed to delete disk"); let provision = datastore - .virtual_resource_provisioning_get(&opctx, project_id) + .virtual_provisioning_collection_get(&opctx, project_id) .await .unwrap(); assert_eq!(provision.virtual_disk_bytes_provisioned, 0); From 206572d2f07bd714c7527e5fda07cce078c26bb7 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 15 Dec 2022 13:20:56 -0500 Subject: [PATCH 41/80] Remove fleet from DB --- common/src/sql/dbinit.sql | 12 ---- nexus/db-model/src/fleet.rs | 28 -------- nexus/db-model/src/lib.rs | 2 - .../virtual_provisioning_collection_update.rs | 7 -- nexus/db-model/src/rack.rs | 4 +- nexus/db-model/src/schema.rs | 10 --- nexus/db-model/src/silo.rs | 12 +--- nexus/src/app/rack.rs | 3 +- nexus/src/db/datastore/fleet.rs | 69 ------------------- nexus/src/db/datastore/mod.rs | 24 +------ nexus/src/db/datastore/silo.rs | 6 +- nexus/src/db/fixed_data/silo.rs | 1 - .../virtual_provisioning_collection_update.rs | 45 +++--------- nexus/src/populate.rs | 42 +---------- 14 files changed, 18 insertions(+), 247 deletions(-) delete mode 100644 nexus/db-model/src/fleet.rs delete mode 100644 nexus/src/db/datastore/fleet.rs diff --git a/common/src/sql/dbinit.sql b/common/src/sql/dbinit.sql index 9c3226f1237..e89b8245924 100644 --- a/common/src/sql/dbinit.sql +++ b/common/src/sql/dbinit.sql @@ -39,14 +39,6 @@ CREATE DATABASE omicron; CREATE USER omicron; ALTER DEFAULT PRIVILEGES GRANT INSERT, SELECT, UPDATE, DELETE ON TABLES to omicron; --- Fleet: Represents a collection of racks -CREATE TABLE omicron.public.fleet ( - /* Identity metadata (asset) */ - id UUID PRIMARY KEY, - time_created TIMESTAMPTZ NOT NULL, - time_modified TIMESTAMPTZ NOT NULL -); - /* * Racks */ @@ -56,9 +48,6 @@ CREATE TABLE omicron.public.rack ( time_created TIMESTAMPTZ NOT NULL, time_modified TIMESTAMPTZ NOT NULL, - -- The fleet to which this rack belongs - fleet_id UUID NOT NULL, - /* * Identifies if rack management has been transferred from RSS -> Nexus. * If "false", RSS is still managing sleds, services, and DNS records. @@ -380,7 +369,6 @@ CREATE TABLE omicron.public.silo ( time_modified TIMESTAMPTZ NOT NULL, time_deleted TIMESTAMPTZ, - fleet_id UUID NOT NULL, discoverable BOOL NOT NULL, authentication_mode omicron.public.authentication_mode NOT NULL, user_provision_type omicron.public.user_provision_type NOT NULL, diff --git a/nexus/db-model/src/fleet.rs b/nexus/db-model/src/fleet.rs deleted file mode 100644 index 56f642f7f7f..00000000000 --- a/nexus/db-model/src/fleet.rs +++ /dev/null @@ -1,28 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -use crate::schema::fleet; -use db_macros::Asset; -use nexus_types::external_api::views; -use uuid::Uuid; - -/// Information about a fleet -#[derive(Queryable, Insertable, Debug, Clone, Selectable, Asset)] -#[diesel(table_name = fleet)] -pub struct Fleet { - #[diesel(embed)] - pub identity: FleetIdentity, -} - -impl Fleet { - pub fn new(id: Uuid) -> Self { - Self { identity: FleetIdentity::new(id) } - } -} - -impl From for views::Fleet { - fn from(fleet: Fleet) -> Self { - Self { identity: views::AssetIdentityMetadata::from(&fleet) } - } -} diff --git a/nexus/db-model/src/lib.rs b/nexus/db-model/src/lib.rs index e5770aef67b..65d29caa1d4 100644 --- a/nexus/db-model/src/lib.rs +++ b/nexus/db-model/src/lib.rs @@ -20,7 +20,6 @@ mod digest; mod disk; mod disk_state; mod external_ip; -mod fleet; mod generation; mod global_image; mod identity_provider; @@ -95,7 +94,6 @@ pub use digest::*; pub use disk::*; pub use disk_state::*; pub use external_ip::*; -pub use fleet::*; pub use generation::*; pub use global_image::*; pub use identity_provider::*; diff --git a/nexus/db-model/src/queries/virtual_provisioning_collection_update.rs b/nexus/db-model/src/queries/virtual_provisioning_collection_update.rs index b2893347374..388737bf965 100644 --- a/nexus/db-model/src/queries/virtual_provisioning_collection_update.rs +++ b/nexus/db-model/src/queries/virtual_provisioning_collection_update.rs @@ -23,12 +23,6 @@ table! { } } -table! { - parent_fleet { - id -> Uuid, - } -} - table! { all_collections { id -> Uuid, @@ -48,7 +42,6 @@ diesel::allow_tables_to_appear_in_same_query!( virtual_provisioning_collection, parent_org, parent_silo, - parent_fleet, all_collections, do_update, ); diff --git a/nexus/db-model/src/rack.rs b/nexus/db-model/src/rack.rs index ff39773b775..ec11474b330 100644 --- a/nexus/db-model/src/rack.rs +++ b/nexus/db-model/src/rack.rs @@ -13,16 +13,14 @@ use uuid::Uuid; pub struct Rack { #[diesel(embed)] pub identity: RackIdentity, - pub fleet_id: Uuid, pub initialized: bool, pub tuf_base_url: Option, } impl Rack { - pub fn new(id: Uuid, fleet_id: Uuid) -> Self { + pub fn new(id: Uuid) -> Self { Self { identity: RackIdentity::new(id), - fleet_id, initialized: false, tuf_base_url: None, } diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 899d1b10700..464ca49188f 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -197,7 +197,6 @@ table! { time_modified -> Timestamptz, time_deleted -> Nullable, - fleet_id -> Uuid, discoverable -> Bool, authentication_mode -> crate::AuthenticationModeEnum, user_provision_type -> crate::UserProvisionTypeEnum, @@ -360,20 +359,11 @@ table! { } } -table! { - fleet (id) { - id -> Uuid, - time_created -> Timestamptz, - time_modified -> Timestamptz, - } -} - table! { rack (id) { id -> Uuid, time_created -> Timestamptz, time_modified -> Timestamptz, - fleet_id -> Uuid, initialized -> Bool, tuf_base_url -> Nullable, } diff --git a/nexus/db-model/src/silo.rs b/nexus/db-model/src/silo.rs index 7dc4bb25056..1644a811f9f 100644 --- a/nexus/db-model/src/silo.rs +++ b/nexus/db-model/src/silo.rs @@ -85,7 +85,6 @@ pub struct Silo { #[diesel(embed)] identity: SiloIdentity, - pub fleet_id: Uuid, pub discoverable: bool, pub authentication_mode: AuthenticationMode, @@ -97,18 +96,13 @@ pub struct Silo { impl Silo { /// Creates a new database Silo object. - pub fn new(params: params::SiloCreate, fleet_id: Uuid) -> Self { - Self::new_with_id(Uuid::new_v4(), params, fleet_id) + pub fn new(params: params::SiloCreate) -> Self { + Self::new_with_id(Uuid::new_v4(), params) } - pub fn new_with_id( - id: Uuid, - params: params::SiloCreate, - fleet_id: Uuid, - ) -> Self { + pub fn new_with_id(id: Uuid, params: params::SiloCreate) -> Self { Self { identity: SiloIdentity::new(id, params.identity), - fleet_id, discoverable: params.discoverable, authentication_mode: params .identity_mode diff --git a/nexus/src/app/rack.rs b/nexus/src/app/rack.rs index 524ee0ca2a7..9aaac5c7dea 100644 --- a/nexus/src/app/rack.rs +++ b/nexus/src/app/rack.rs @@ -43,10 +43,9 @@ impl super::Nexus { &self, opctx: &OpContext, rack_id: Uuid, - fleet_id: Uuid, ) -> Result<(), Error> { self.datastore() - .rack_insert(opctx, &db::model::Rack::new(rack_id, fleet_id)) + .rack_insert(opctx, &db::model::Rack::new(rack_id)) .await?; Ok(()) } diff --git a/nexus/src/db/datastore/fleet.rs b/nexus/src/db/datastore/fleet.rs deleted file mode 100644 index 42b43d4f5be..00000000000 --- a/nexus/src/db/datastore/fleet.rs +++ /dev/null @@ -1,69 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -//! [`DataStore`] methods on [`Fleet`]s. - -use super::DataStore; -use crate::authz; -use crate::context::OpContext; -use crate::db; -use crate::db::error::public_error_from_diesel_pool; -use crate::db::error::ErrorHandler; -use crate::db::identity::Asset; -use crate::db::model::Fleet; -use crate::db::pagination::paginated; -use async_bb8_diesel::AsyncRunQueryDsl; -use diesel::prelude::*; -use diesel::upsert::excluded; -use omicron_common::api::external::DataPageParams; -use omicron_common::api::external::Error; -use omicron_common::api::external::ListResultVec; -use omicron_common::api::external::ResourceType; -use uuid::Uuid; - -impl DataStore { - pub async fn fleet_list( - &self, - opctx: &OpContext, - pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResultVec { - opctx.authorize(authz::Action::Read, &authz::FLEET).await?; - use db::schema::fleet::dsl; - paginated(dsl::fleet, dsl::id, pagparams) - .select(Fleet::as_select()) - .load_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - /// Stores a new fleet in the database. - /// - /// This function is a no-op if the fleet already exists. - pub async fn fleet_insert( - &self, - opctx: &OpContext, - fleet: &Fleet, - ) -> Result { - use db::schema::fleet::dsl; - - diesel::insert_into(dsl::fleet) - .values(fleet.clone()) - .on_conflict(dsl::id) - .do_update() - // This is a no-op, since we conflicted on the ID. - .set(dsl::id.eq(excluded(dsl::id))) - .returning(Fleet::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::Fleet, - &fleet.id().to_string(), - ), - ) - }) - } -} diff --git a/nexus/src/db/datastore/mod.rs b/nexus/src/db/datastore/mod.rs index 803edb5851f..a07caef5d50 100644 --- a/nexus/src/db/datastore/mod.rs +++ b/nexus/src/db/datastore/mod.rs @@ -46,7 +46,6 @@ mod dataset; mod device_auth; mod disk; mod external_ip; -mod fleet; mod global_image; mod identity_provider; mod instance; @@ -262,7 +261,6 @@ mod test { use crate::db::model::BlockSize; use crate::db::model::Dataset; use crate::db::model::ExternalIp; - use crate::db::model::Fleet; use crate::db::model::Rack; use crate::db::model::Region; use crate::db::model::Service; @@ -1025,7 +1023,7 @@ mod test { let (opctx, datastore) = datastore_test(&logctx, &db).await; // Create a Rack, insert it into the DB. - let rack = Rack::new(Uuid::new_v4(), *db::fixed_data::FLEET_ID); + let rack = Rack::new(Uuid::new_v4()); let result = datastore.rack_insert(&opctx, &rack).await.unwrap(); assert_eq!(result.id(), rack.id()); assert_eq!(result.initialized, false); @@ -1053,26 +1051,6 @@ mod test { logctx.cleanup_successful(); } - #[tokio::test] - async fn test_fleet_initialize_is_idempotent() { - let logctx = dev::test_setup_log("test_fleet_initialize_is_idempotent"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; - - // Create a Fleet, insert it into the DB. - let fleet = Fleet::new(Uuid::new_v4()); - let result = datastore.fleet_insert(&opctx, &fleet).await.unwrap(); - assert_eq!(result.id(), fleet.id()); - - // Re-insert the Fleet (check for idempotency). - let result2 = datastore.fleet_insert(&opctx, &fleet).await.unwrap(); - assert_eq!(result2.id(), fleet.id()); - assert_eq!(result2.time_modified(), result.time_modified()); - - db.cleanup().await.unwrap(); - logctx.cleanup_successful(); - } - #[tokio::test] async fn test_table_scan() { let logctx = dev::test_setup_log("test_table_scan"); diff --git a/nexus/src/db/datastore/silo.rs b/nexus/src/db/datastore/silo.rs index e3d3a3eb559..6fa042b0e1b 100644 --- a/nexus/src/db/datastore/silo.rs +++ b/nexus/src/db/datastore/silo.rs @@ -91,11 +91,7 @@ impl DataStore { let silo_create_query = Self::silo_create_query( opctx, - db::model::Silo::new_with_id( - silo_id, - new_silo_params.clone(), - *db::fixed_data::FLEET_ID, - ), + db::model::Silo::new_with_id(silo_id, new_silo_params.clone()), ) .await?; diff --git a/nexus/src/db/fixed_data/silo.rs b/nexus/src/db/fixed_data/silo.rs index f633fa76168..dea6477a1be 100644 --- a/nexus/src/db/fixed_data/silo.rs +++ b/nexus/src/db/fixed_data/silo.rs @@ -22,6 +22,5 @@ lazy_static! { identity_mode: shared::SiloIdentityMode::LocalOnly, admin_group_name: None, }, - *super::FLEET_ID, ); } diff --git a/nexus/src/db/queries/virtual_provisioning_collection_update.rs b/nexus/src/db/queries/virtual_provisioning_collection_update.rs index 8b83ce34a1e..3b7345c0c10 100644 --- a/nexus/src/db/queries/virtual_provisioning_collection_update.rs +++ b/nexus/src/db/queries/virtual_provisioning_collection_update.rs @@ -20,7 +20,7 @@ use diesel::{ NullableExpressionMethods, QueryDsl, RunQueryDsl, SelectableHelper, }; use nexus_db_model::queries::virtual_provisioning_collection_update::{ - all_collections, do_update, parent_fleet, parent_org, parent_silo, + all_collections, do_update, parent_org, parent_silo, }; #[derive(Subquery, QueryId)] @@ -67,29 +67,6 @@ impl ParentSilo { } } -#[derive(Subquery, QueryId)] -#[subquery(name = parent_fleet)] -struct ParentFleet { - query: Box>, -} - -impl ParentFleet { - fn new(parent_silo: &ParentSilo) -> Self { - use crate::db::schema::silo::dsl; - Self { - query: Box::new( - dsl::silo - .filter(dsl::id.eq_any( - parent_silo.query_source().select(parent_silo::id), - )) - .select((ExpressionAlias::new::( - dsl::fleet_id, - ),)), - ), - } - } -} - #[derive(Subquery, QueryId)] #[subquery(name = all_collections)] struct AllCollections { @@ -101,15 +78,15 @@ impl AllCollections { project_id: uuid::Uuid, parent_org: &ParentOrg, parent_silo: &ParentSilo, - parent_fleet: &ParentFleet, + fleet_id: uuid::Uuid, ) -> Self { + let project_id = project_id.into_sql::(); + let fleet_id = fleet_id.into_sql::(); Self { query: Box::new( diesel::select((ExpressionAlias::new::< all_collections::dsl::id, - >( - project_id.into_sql::() - ),)) + >(project_id),)) .union(parent_org.query_source().select(( ExpressionAlias::new::( parent_org::id, @@ -120,11 +97,9 @@ impl AllCollections { parent_silo::id, ), ))) - .union(parent_fleet.query_source().select(( - ExpressionAlias::new::( - parent_fleet::id, - ), - ))), + .union(diesel::select((ExpressionAlias::new::< + all_collections::dsl::id, + >(fleet_id),))), ), } } @@ -289,12 +264,11 @@ impl VirtualProvisioningCollectionUpdate { { let parent_org = ParentOrg::new(project_id); let parent_silo = ParentSilo::new(&parent_org); - let parent_fleet = ParentFleet::new(&parent_silo); let all_collections = AllCollections::new( project_id, &parent_org, &parent_silo, - &parent_fleet, + *crate::db::fixed_data::FLEET_ID, ); let updated_collections = UpdatedProvisions::new(&all_collections, &do_update, values); @@ -310,7 +284,6 @@ impl VirtualProvisioningCollectionUpdate { let cte = CteBuilder::new() .add_subquery(parent_org) .add_subquery(parent_silo) - .add_subquery(parent_fleet) .add_subquery(all_collections) .add_subquery(do_update) .add_subquery(update) diff --git a/nexus/src/populate.rs b/nexus/src/populate.rs index 2c9bae5de4a..8c201fba168 100644 --- a/nexus/src/populate.rs +++ b/nexus/src/populate.rs @@ -266,37 +266,6 @@ impl Populator for PopulateSiloUserRoleAssignments { } } -#[derive(Debug)] -struct PopulateFleet; -impl Populator for PopulateFleet { - fn populate<'a, 'b>( - &self, - opctx: &'a OpContext, - datastore: &'a DataStore, - _args: &'a PopulateArgs, - ) -> BoxFuture<'b, Result<(), Error>> - where - 'a: 'b, - { - async { - let id = *db::fixed_data::FLEET_ID; - datastore.fleet_insert(opctx, &db::model::Fleet::new(id)).await?; - datastore - .virtual_provisioning_collection_create( - opctx, - db::model::VirtualProvisioningCollection::new( - id, - db::model::CollectionTypeProvisioned::Fleet, - ), - ) - .await?; - - Ok(()) - } - .boxed() - } -} - #[derive(Debug)] struct PopulateRack; impl Populator for PopulateRack { @@ -311,13 +280,7 @@ impl Populator for PopulateRack { { async { datastore - .rack_insert( - opctx, - &db::model::Rack::new( - args.rack_id, - *db::fixed_data::FLEET_ID, - ), - ) + .rack_insert(opctx, &db::model::Rack::new(args.rack_id)) .await?; let params = params::IpPoolCreate { @@ -342,14 +305,13 @@ impl Populator for PopulateRack { } lazy_static! { - static ref ALL_POPULATORS: [&'static dyn Populator; 8] = [ + static ref ALL_POPULATORS: [&'static dyn Populator; 7] = [ &PopulateBuiltinUsers, &PopulateBuiltinRoles, &PopulateBuiltinRoleAssignments, &PopulateBuiltinSilos, &PopulateSiloUsers, &PopulateSiloUserRoleAssignments, - &PopulateFleet, &PopulateRack, ]; } From 82c637853f44a141222ce288501638a9a847e668 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 15 Dec 2022 13:38:13 -0500 Subject: [PATCH 42/80] Still need to insert collection record for fleet --- nexus/src/populate.rs | 33 ++++++++++++++++++++++++++++++++- openapi/nexus.json | 18 ++++++------------ 2 files changed, 38 insertions(+), 13 deletions(-) diff --git a/nexus/src/populate.rs b/nexus/src/populate.rs index 8c201fba168..ce6f384ac5c 100644 --- a/nexus/src/populate.rs +++ b/nexus/src/populate.rs @@ -266,6 +266,36 @@ impl Populator for PopulateSiloUserRoleAssignments { } } +#[derive(Debug)] +struct PopulateFleet; +impl Populator for PopulateFleet { + fn populate<'a, 'b>( + &self, + opctx: &'a OpContext, + datastore: &'a DataStore, + _args: &'a PopulateArgs, + ) -> BoxFuture<'b, Result<(), Error>> + where + 'a: 'b, + { + async { + let id = *db::fixed_data::FLEET_ID; + datastore + .virtual_provisioning_collection_create( + opctx, + db::model::VirtualProvisioningCollection::new( + id, + db::model::CollectionTypeProvisioned::Fleet, + ), + ) + .await?; + + Ok(()) + } + .boxed() + } +} + #[derive(Debug)] struct PopulateRack; impl Populator for PopulateRack { @@ -305,13 +335,14 @@ impl Populator for PopulateRack { } lazy_static! { - static ref ALL_POPULATORS: [&'static dyn Populator; 7] = [ + static ref ALL_POPULATORS: [&'static dyn Populator; 8] = [ &PopulateBuiltinUsers, &PopulateBuiltinRoles, &PopulateBuiltinRoleAssignments, &PopulateBuiltinSilos, &PopulateSiloUsers, &PopulateSiloUserRoleAssignments, + &PopulateFleet, &PopulateRack, ]; } diff --git a/openapi/nexus.json b/openapi/nexus.json index da56d815754..f68a1a3d122 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -6290,8 +6290,7 @@ "required": true, "schema": { "$ref": "#/components/schemas/SystemMetricName" - }, - "style": "simple" + } }, { "in": "query", @@ -6300,8 +6299,7 @@ "schema": { "type": "string", "format": "date-time" - }, - "style": "form" + } }, { "in": "query", @@ -6311,8 +6309,7 @@ "schema": { "type": "string", "format": "uuid" - }, - "style": "form" + } }, { "in": "query", @@ -6323,8 +6320,7 @@ "type": "integer", "format": "uint32", "minimum": 1 - }, - "style": "form" + } }, { "in": "query", @@ -6333,8 +6329,7 @@ "schema": { "nullable": true, "type": "string" - }, - "style": "form" + } }, { "in": "query", @@ -6343,8 +6338,7 @@ "schema": { "type": "string", "format": "date-time" - }, - "style": "form" + } } ], "responses": { From e398fe187bd7b02bc15c564cc081366592be3ea2 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 15 Dec 2022 14:20:05 -0500 Subject: [PATCH 43/80] Comments, distinguish between disk and snapshots --- common/src/sql/dbinit.sql | 9 ++ .../src/virtual_provisioning_resource.rs | 2 + nexus/src/app/sagas/snapshot_create.rs | 4 +- nexus/src/db/datastore/mod.rs | 1 + .../virtual_provisioning_collection.rs | 111 ++++++++++++++++-- .../virtual_provisioning_collection_update.rs | 7 +- 6 files changed, 121 insertions(+), 13 deletions(-) diff --git a/common/src/sql/dbinit.sql b/common/src/sql/dbinit.sql index e89b8245924..c43caa44f5f 100644 --- a/common/src/sql/dbinit.sql +++ b/common/src/sql/dbinit.sql @@ -156,6 +156,15 @@ CREATE TABLE omicron.public.virtual_provisioning_collection ( -- - Disks -- - Instances -- - Snapshots +-- +-- NOTE: You might think to yourself: "This table looks an awful lot like +-- the 'virtual_provisioning_collection' table, could they be condensed into +-- a single table?" +-- The answer to this question is unfortunately: "No". We use CTEs to both +-- UPDATE the collection table while INSERTing rows in the resource table, and +-- this would not be allowed if they came from the same table due to: +-- https://www.cockroachlabs.com/docs/v22.2/known-limitations#statements-containing-multiple-modification-subqueries-of-the-same-table-are-disallowed +-- However, by using separate tables, the CTE is able to function correctly. CREATE TABLE omicron.public.virtual_provisioning_resource ( -- Should match the UUID of the corresponding collection. id UUID PRIMARY KEY, diff --git a/nexus/db-model/src/virtual_provisioning_resource.rs b/nexus/db-model/src/virtual_provisioning_resource.rs index 1ed9c4227d2..9d5f7a8cba8 100644 --- a/nexus/db-model/src/virtual_provisioning_resource.rs +++ b/nexus/db-model/src/virtual_provisioning_resource.rs @@ -9,6 +9,7 @@ use uuid::Uuid; pub enum ResourceTypeProvisioned { Instance, Disk, + Snapshot, } impl std::fmt::Display for ResourceTypeProvisioned { @@ -16,6 +17,7 @@ impl std::fmt::Display for ResourceTypeProvisioned { match self { ResourceTypeProvisioned::Instance => write!(f, "instance"), ResourceTypeProvisioned::Disk => write!(f, "disk"), + ResourceTypeProvisioned::Snapshot => write!(f, "snapshot"), } } } diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index 7d0b49ea080..9a3c59cd1bf 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -551,7 +551,7 @@ async fn ssc_account_space( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .virtual_provisioning_collection_insert_disk( + .virtual_provisioning_collection_insert_snapshot( &opctx, snapshot_created.id(), params.project_id, @@ -580,7 +580,7 @@ async fn ssc_account_space_undo( let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() - .virtual_provisioning_collection_delete_disk( + .virtual_provisioning_collection_delete_snapshot( &opctx, snapshot_created.id(), params.project_id, diff --git a/nexus/src/db/datastore/mod.rs b/nexus/src/db/datastore/mod.rs index a07caef5d50..4757ac2175c 100644 --- a/nexus/src/db/datastore/mod.rs +++ b/nexus/src/db/datastore/mod.rs @@ -72,6 +72,7 @@ mod volume; mod vpc; mod zpool; +pub use virtual_provisioning_collection::StorageType; pub use volume::CrucibleResources; // Number of unique datasets required to back a region. diff --git a/nexus/src/db/datastore/virtual_provisioning_collection.rs b/nexus/src/db/datastore/virtual_provisioning_collection.rs index a7709d83c17..64dc3f85156 100644 --- a/nexus/src/db/datastore/virtual_provisioning_collection.rs +++ b/nexus/src/db/datastore/virtual_provisioning_collection.rs @@ -121,6 +121,27 @@ impl oximeter::Producer for Producer { } } +/// The types of resources which can consume storage space. +pub enum StorageType { + Disk, + Snapshot, +} + +impl From for crate::db::model::ResourceTypeProvisioned { + fn from( + storage_type: StorageType, + ) -> crate::db::model::ResourceTypeProvisioned { + match storage_type { + StorageType::Disk => { + crate::db::model::ResourceTypeProvisioned::Disk + } + StorageType::Snapshot => { + crate::db::model::ResourceTypeProvisioned::Snapshot + } + } + } +} + impl DataStore { /// Create a [`VirtualProvisioningCollection`] object. pub async fn virtual_provisioning_collection_create( @@ -220,7 +241,6 @@ impl DataStore { // I think we just need to validate that the model exists when we make these // calls? Maybe it could be an optional helper? - /// Transitively updates all provisioned disk provisions from project -> fleet. pub async fn virtual_provisioning_collection_insert_disk( &self, opctx: &OpContext, @@ -228,20 +248,59 @@ impl DataStore { project_id: Uuid, disk_byte_diff: i64, ) -> Result, Error> { - let provisions = VirtualProvisioningCollectionUpdate::new_insert_disk( + self.virtual_provisioning_collection_insert_storage( + opctx, id, + project_id, disk_byte_diff, + StorageType::Disk, + ) + .await + } + + pub async fn virtual_provisioning_collection_insert_snapshot( + &self, + opctx: &OpContext, + id: Uuid, + project_id: Uuid, + disk_byte_diff: i64, + ) -> Result, Error> { + self.virtual_provisioning_collection_insert_storage( + opctx, + id, project_id, + disk_byte_diff, + StorageType::Snapshot, ) - .get_results_async(self.pool_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; + } + + /// Transitively updates all provisioned disk provisions from project -> fleet. + async fn virtual_provisioning_collection_insert_storage( + &self, + opctx: &OpContext, + id: Uuid, + project_id: Uuid, + disk_byte_diff: i64, + storage_type: StorageType, + ) -> Result, Error> { + let provisions = + VirtualProvisioningCollectionUpdate::new_insert_storage( + id, + disk_byte_diff, + project_id, + storage_type, + ) + .get_results_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool(e, ErrorHandler::Server) + })?; self.virtual_provisioning_collection_producer .append_disk_metrics(&provisions); Ok(provisions) } - /// Transitively updates all provisioned disk provisions from project -> fleet. pub async fn virtual_provisioning_collection_delete_disk( &self, opctx: &OpContext, @@ -249,14 +308,50 @@ impl DataStore { project_id: Uuid, disk_byte_diff: i64, ) -> Result, Error> { - let provisions = VirtualProvisioningCollectionUpdate::new_delete_disk( + self.virtual_provisioning_collection_delete_storage( + opctx, id, + project_id, disk_byte_diff, + ) + .await + } + + pub async fn virtual_provisioning_collection_delete_snapshot( + &self, + opctx: &OpContext, + id: Uuid, + project_id: Uuid, + disk_byte_diff: i64, + ) -> Result, Error> { + self.virtual_provisioning_collection_delete_storage( + opctx, + id, project_id, + disk_byte_diff, ) - .get_results_async(self.pool_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; + } + + // Transitively updates all provisioned disk provisions from project -> fleet. + async fn virtual_provisioning_collection_delete_storage( + &self, + opctx: &OpContext, + id: Uuid, + project_id: Uuid, + disk_byte_diff: i64, + ) -> Result, Error> { + let provisions = + VirtualProvisioningCollectionUpdate::new_delete_storage( + id, + disk_byte_diff, + project_id, + ) + .get_results_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool(e, ErrorHandler::Server) + })?; self.virtual_provisioning_collection_producer .append_disk_metrics(&provisions); Ok(provisions) diff --git a/nexus/src/db/queries/virtual_provisioning_collection_update.rs b/nexus/src/db/queries/virtual_provisioning_collection_update.rs index 3b7345c0c10..1aa6f0d16fe 100644 --- a/nexus/src/db/queries/virtual_provisioning_collection_update.rs +++ b/nexus/src/db/queries/virtual_provisioning_collection_update.rs @@ -293,16 +293,17 @@ impl VirtualProvisioningCollectionUpdate { Self { cte } } - pub fn new_insert_disk( + pub fn new_insert_storage( id: uuid::Uuid, disk_byte_diff: i64, project_id: uuid::Uuid, + storage_type: crate::db::datastore::StorageType, ) -> Self { use virtual_provisioning_collection::dsl as collection_dsl; use virtual_provisioning_resource::dsl as resource_dsl; let mut provision = - VirtualProvisioningResource::new(id, ResourceTypeProvisioned::Disk); + VirtualProvisioningResource::new(id, storage_type.into()); provision.virtual_disk_bytes_provisioned = disk_byte_diff; Self::apply_update( @@ -326,7 +327,7 @@ impl VirtualProvisioningCollectionUpdate { ) } - pub fn new_delete_disk( + pub fn new_delete_storage( id: uuid::Uuid, disk_byte_diff: i64, project_id: uuid::Uuid, From 29edafd75b2a4a19381f16ad999dfc60d9114355 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Thu, 15 Dec 2022 19:13:31 -0500 Subject: [PATCH 44/80] Just starting to make snapshot delete a saga --- nexus/src/app/saga.rs | 9 ++ nexus/src/app/sagas/mod.rs | 4 + nexus/src/app/sagas/snapshot_delete.rs | 154 +++++++++++++++++++++++++ 3 files changed, 167 insertions(+) create mode 100644 nexus/src/app/sagas/snapshot_delete.rs diff --git a/nexus/src/app/saga.rs b/nexus/src/app/saga.rs index 256c3a73290..e12d32518ce 100644 --- a/nexus/src/app/saga.rs +++ b/nexus/src/app/saga.rs @@ -101,6 +101,15 @@ impl super::Nexus { ) -> Result { // Construct the context necessary to execute this saga. let saga_id = SagaId(Uuid::new_v4()); + + self.create_runnable_saga_with_id(dag, saga_id).await + } + + pub async fn create_runnable_saga_with_id( + self: &Arc, + dag: SagaDag, + saga_id: SagaId, + ) -> Result { let saga_logger = self.log.new(o!( "saga_name" => dag.saga_name().to_string(), "saga_id" => saga_id.to_string() diff --git a/nexus/src/app/sagas/mod.rs b/nexus/src/app/sagas/mod.rs index 3fdba98c73a..c77bcf227d7 100644 --- a/nexus/src/app/sagas/mod.rs +++ b/nexus/src/app/sagas/mod.rs @@ -24,6 +24,7 @@ pub mod disk_delete; pub mod instance_create; pub mod instance_migrate; pub mod snapshot_create; +pub mod snapshot_delete; pub mod volume_delete; pub mod volume_remove_rop; @@ -101,6 +102,9 @@ fn make_action_registry() -> ActionRegistry { ::register_actions( &mut registry, ); + ::register_actions( + &mut registry, + ); ::register_actions( &mut registry, ); diff --git a/nexus/src/app/sagas/snapshot_delete.rs b/nexus/src/app/sagas/snapshot_delete.rs new file mode 100644 index 00000000000..d6588e68e40 --- /dev/null +++ b/nexus/src/app/sagas/snapshot_delete.rs @@ -0,0 +1,154 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use super::{ + ActionRegistry, + NexusActionContext, NexusSaga, SagaInitError, ACTION_GENERATE_ID, +}; +use crate::app::sagas::NexusAction; +use crate::context::OpContext; +use crate::db::identity::{Asset, Resource}; +use crate::external_api::params; +use crate::{authn, db}; +use lazy_static::lazy_static; +use serde::Deserialize; +use serde::Serialize; +use std::sync::Arc; +use steno::new_action_noop_undo; +use steno::ActionError; +use steno::Node; +use uuid::Uuid; + +// snapshot create saga: input parameters + +#[derive(Debug, Deserialize, Serialize)] +pub struct Params { + pub serialized_authn: authn::saga::Serialized, + pub snapshot_id: Uuid, + pub project_id: Uuid, + pub disk_id: Uuid, + pub create_params: params::SnapshotCreate, +} + +// snapshot create saga: actions + +lazy_static! { + static ref DELETE_SNAPSHOT_RECORD: NexusAction = new_action_noop_undo( + "snapshot-delete.delete-snapshot-record", + ssd_delete_snapshot_record, + ); + static ref SPACE_ACCOUNT: NexusAction = new_action_noop_undo( + "snapshot-delete.account-space", + ssd_account_space, + ); + static ref DELETE_SOURCE_VOLUME: NexusAction = new_action_noop_undo( + "snapshot-delete.delete-source-volume", + ssd_delete_source_volume, + ); + static ref DELETE_DESTINATION_VOLUME: NexusAction = new_action_noop_undo( + "snapshot-delete.delete-destination-volume", + ssd_delete_destination_volume, + ); +} + +// snapshot delete saga: definition + +#[derive(Debug)] +pub struct SagaSnapshotDelete; +impl NexusSaga for SagaSnapshotDelete { + const NAME: &'static str = "snapshot-delete"; + type Params = Params; + + fn register_actions(registry: &mut ActionRegistry) { + registry.register(Arc::clone(&*DELETE_SNAPSHOT_RECORD)); + registry.register(Arc::clone(&*SPACE_ACCOUNT)); + registry.register(Arc::clone(&*DELETE_SOURCE_VOLUME)); + registry.register(Arc::clone(&*DELETE_DESTINATION_VOLUME)); + } + + fn make_saga_dag( + _params: &Self::Params, + mut builder: steno::DagBuilder, + ) -> Result { + // Generate IDs + builder.append(Node::action( + "delete_source_volume_saga_id", + "GenerateSourceVolumeSagaId", + ACTION_GENERATE_ID.as_ref(), + )); + + builder.append(Node::action( + "delete_destination_volume_saga_id", + "GenerateDestinationVolumeSagaId", + ACTION_GENERATE_ID.as_ref(), + )); + builder.append(Node::action( + "deleted_snapshot", + "DeleteSnapshotRecord", + DELETE_SNAPSHOT_RECORD.as_ref(), + )); + builder.append(Node::action( + "accounted_space", + "SpaceAccount", + SPACE_ACCOUNT.as_ref(), + )); + builder.append(Node::action( + "deleted_source_volume_saga", + "DeleteSourceVolumeSaga", + DELETE_SOURCE_VOLUME.as_ref(), + )); + builder.append(Node::action( + "deleted_destination_volume_saga", + "DeleteDestinationVolumeSaga", + DELETE_DESTINATION_VOLUME.as_ref(), + )); + + Ok(builder.build()?) + } +} + +// snapshot delete saga: action implementations + +async fn ssd_delete_snapshot_record( + sagactx: NexusActionContext, +) -> Result<(), ActionError> { + todo!(); +} + +async fn ssd_account_space( + sagactx: NexusActionContext, +) -> Result<(), ActionError> { + todo!(); + /* + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + + let snapshot_created = + sagactx.lookup::("created_snapshot")?; + let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); + osagactx + .datastore() + .virtual_provisioning_collection_delete_snapshot( + &opctx, + snapshot_created.id(), + params.project_id, + // TODO: How many bytes? read while deleting? + ) + .await + .map_err(ActionError::action_failed)?; + */ + Ok(()) +} + +async fn ssd_delete_source_volume( + sagactx: NexusActionContext, +) -> Result<(), ActionError> { + todo!(); +} + +async fn ssd_delete_destination_volume( + sagactx: NexusActionContext, +) -> Result<(), ActionError> { + todo!(); +} From f382a73a07dfb3de2dac3810f70312a98983fec3 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 26 Dec 2022 18:04:24 -0500 Subject: [PATCH 45/80] Patch tests, pull in some saga-ification PRs --- nexus/src/app/disk.rs | 36 ++--- nexus/src/app/sagas/disk_delete.rs | 2 +- nexus/src/app/sagas/snapshot_create.rs | 16 +-- nexus/src/app/sagas/snapshot_delete.rs | 182 ++++++++++++------------- nexus/src/app/sagas/volume_delete.rs | 32 +++-- 5 files changed, 127 insertions(+), 141 deletions(-) diff --git a/nexus/src/app/disk.rs b/nexus/src/app/disk.rs index 0768a9bef54..f15ce53621b 100644 --- a/nexus/src/app/disk.rs +++ b/nexus/src/app/disk.rs @@ -12,7 +12,6 @@ use crate::db; use crate::db::lookup::LookupPath; use crate::db::model::Name; use crate::external_api::params; -use nexus_types::identity::Resource; use omicron_common::api::external::ByteCount; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DataPageParams; @@ -501,10 +500,9 @@ impl super::Nexus { // (on-disk snapshots, running read-only downstairs) because disks // *could* still be using them (if the snapshot has not yet been turned // into a regular crucible volume). It will involve some sort of - // reference counting for volumes, and probably means this needs to - // instead be a saga. + // reference counting for volumes. - let (.., project, authz_snapshot, db_snapshot) = + let (.., authz_snapshot, db_snapshot) = LookupPath::new(opctx, &self.db_datastore) .organization_name(organization_name) .project_name(project_name) @@ -512,27 +510,15 @@ impl super::Nexus { .fetch() .await?; - // TODO: This should exist within a saga - self.db_datastore - .virtual_provisioning_collection_delete_disk( - &opctx, - db_snapshot.id(), - project.id(), - -i64::try_from(db_snapshot.size.to_bytes()).map_err(|e| { - Error::internal_error(&format!( - "updating resource provisioning: {e}" - )) - })?, - ) - .await?; - - self.db_datastore - .project_delete_snapshot(opctx, &authz_snapshot, &db_snapshot) - .await?; - - // Kick off volume deletion saga(s) - self.volume_delete(opctx, db_snapshot.volume_id).await?; - self.volume_delete(opctx, db_snapshot.destination_volume_id).await?; + let saga_params = sagas::snapshot_delete::Params { + serialized_authn: authn::saga::Serialized::for_opctx(opctx), + authz_snapshot, + snapshot: db_snapshot, + }; + self.execute_saga::( + saga_params, + ) + .await?; Ok(()) } diff --git a/nexus/src/app/sagas/disk_delete.rs b/nexus/src/app/sagas/disk_delete.rs index d8355e75216..e87ebe4f309 100644 --- a/nexus/src/app/sagas/disk_delete.rs +++ b/nexus/src/app/sagas/disk_delete.rs @@ -28,7 +28,7 @@ pub struct Params { declare_saga_actions! { disk_delete; - DELETE_DISK_RECORD -> "volume_id" { + DELETE_DISK_RECORD -> "deleted_disk" { // TODO: See the comment on the "DeleteRegions" step, // we may want to un-delete the disk if we cannot remove // underlying regions. diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index 714cb3e43f9..1f077caff7d 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -125,6 +125,7 @@ declare_saga_actions! { snapshot_create; REGIONS_ALLOC -> "datasets_and_regions" { + ssc_alloc_regions + - ssc_alloc_regions_undo } REGIONS_ENSURE -> "regions_ensure" { + ssc_regions_ensure @@ -521,16 +522,13 @@ async fn ssc_account_space_undo( &opctx, snapshot_created.id(), params.project_id, - -i64::try_from(snapshot_created.size.to_bytes()) - .map_err(|e| { - Error::internal_error(&format!( - "updating resource provisioning: {e}" - )) - }) - .map_err(ActionError::action_failed)?, + -i64::try_from(snapshot_created.size.to_bytes()).map_err(|e| { + Error::internal_error(&format!( + "updating resource provisioning: {e}" + )) + })?, ) - .await - .map_err(ActionError::action_failed)?; + .await?; Ok(()) } diff --git a/nexus/src/app/sagas/snapshot_delete.rs b/nexus/src/app/sagas/snapshot_delete.rs index e9de0afcd00..9558d8f3647 100644 --- a/nexus/src/app/sagas/snapshot_delete.rs +++ b/nexus/src/app/sagas/snapshot_delete.rs @@ -2,56 +2,35 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use super::{ - ActionRegistry, NexusActionContext, NexusSaga, SagaInitError, - ACTION_GENERATE_ID, -}; -use crate::app::sagas::NexusAction; -use crate::authn; -use crate::external_api::params; -use lazy_static::lazy_static; +use super::{ActionRegistry, NexusActionContext, NexusSaga}; +use crate::app::sagas; +use crate::app::sagas::declare_saga_actions; +use crate::context::OpContext; +use crate::db; +use crate::{authn, authz}; +use omicron_common::api::external::Error; use serde::Deserialize; use serde::Serialize; -use std::sync::Arc; -use steno::new_action_noop_undo; use steno::ActionError; use steno::Node; -use uuid::Uuid; - -// snapshot create saga: input parameters #[derive(Debug, Deserialize, Serialize)] pub struct Params { pub serialized_authn: authn::saga::Serialized, - pub snapshot_id: Uuid, - pub project_id: Uuid, - pub disk_id: Uuid, - pub create_params: params::SnapshotCreate, + pub authz_snapshot: authz::Snapshot, + pub snapshot: db::model::Snapshot, } -// snapshot create saga: actions - -lazy_static! { - static ref DELETE_SNAPSHOT_RECORD: NexusAction = new_action_noop_undo( - "snapshot-delete.delete-snapshot-record", - ssd_delete_snapshot_record, - ); - static ref SPACE_ACCOUNT: NexusAction = new_action_noop_undo( - "snapshot-delete.account-space", - ssd_account_space, - ); - static ref DELETE_SOURCE_VOLUME: NexusAction = new_action_noop_undo( - "snapshot-delete.delete-source-volume", - ssd_delete_source_volume, - ); - static ref DELETE_DESTINATION_VOLUME: NexusAction = new_action_noop_undo( - "snapshot-delete.delete-destination-volume", - ssd_delete_destination_volume, - ); +declare_saga_actions! { + snapshot_delete; + DELETE_SNAPSHOT_RECORD -> "no_result1" { + + ssd_delete_snapshot_record + } + SPACE_ACCOUNT -> "no_result2" { + + ssd_account_space + } } -// snapshot delete saga: definition - #[derive(Debug)] pub struct SagaSnapshotDelete; impl NexusSaga for SagaSnapshotDelete { @@ -59,47 +38,63 @@ impl NexusSaga for SagaSnapshotDelete { type Params = Params; fn register_actions(registry: &mut ActionRegistry) { - registry.register(Arc::clone(&*DELETE_SNAPSHOT_RECORD)); - registry.register(Arc::clone(&*SPACE_ACCOUNT)); - registry.register(Arc::clone(&*DELETE_SOURCE_VOLUME)); - registry.register(Arc::clone(&*DELETE_DESTINATION_VOLUME)); + snapshot_delete_register_actions(registry); } fn make_saga_dag( - _params: &Self::Params, + params: &Self::Params, mut builder: steno::DagBuilder, - ) -> Result { - // Generate IDs - builder.append(Node::action( - "delete_source_volume_saga_id", - "GenerateSourceVolumeSagaId", - ACTION_GENERATE_ID.as_ref(), + ) -> Result { + builder.append(delete_snapshot_record_action()); + builder.append(space_account_action()); + + const DELETE_VOLUME_PARAMS: &'static str = "delete_volume_params"; + const DELETE_VOLUME_DESTINATION_PARAMS: &'static str = + "delete_volume_destination_params"; + + let volume_delete_params = sagas::volume_delete::Params { + serialized_authn: params.serialized_authn.clone(), + volume_id: params.snapshot.volume_id, + }; + builder.append(Node::constant( + DELETE_VOLUME_PARAMS, + serde_json::to_value(&volume_delete_params).map_err(|e| { + super::SagaInitError::SerializeError( + String::from("volume_id"), + e, + ) + })?, )); - builder.append(Node::action( - "delete_destination_volume_saga_id", - "GenerateDestinationVolumeSagaId", - ACTION_GENERATE_ID.as_ref(), - )); - builder.append(Node::action( - "deleted_snapshot", - "DeleteSnapshotRecord", - DELETE_SNAPSHOT_RECORD.as_ref(), - )); - builder.append(Node::action( - "accounted_space", - "SpaceAccount", - SPACE_ACCOUNT.as_ref(), + let volume_delete_params = sagas::volume_delete::Params { + serialized_authn: params.serialized_authn.clone(), + volume_id: params.snapshot.destination_volume_id, + }; + builder.append(Node::constant( + DELETE_VOLUME_DESTINATION_PARAMS, + serde_json::to_value(&volume_delete_params).map_err(|e| { + super::SagaInitError::SerializeError( + String::from("volume_id"), + e, + ) + })?, )); - builder.append(Node::action( - "deleted_source_volume_saga", - "DeleteSourceVolumeSaga", - DELETE_SOURCE_VOLUME.as_ref(), + + let make_volume_delete_dag = || { + let subsaga_builder = steno::DagBuilder::new(steno::SagaName::new( + sagas::volume_delete::SagaVolumeDelete::NAME, + )); + sagas::volume_delete::create_dag(subsaga_builder) + }; + builder.append(steno::Node::subsaga( + "delete_volume", + make_volume_delete_dag()?, + DELETE_VOLUME_PARAMS, )); - builder.append(Node::action( - "deleted_destination_volume_saga", - "DeleteDestinationVolumeSaga", - DELETE_DESTINATION_VOLUME.as_ref(), + builder.append(steno::Node::subsaga( + "delete_destination_volume", + make_volume_delete_dag()?, + DELETE_VOLUME_DESTINATION_PARAMS, )); Ok(builder.build()?) @@ -111,42 +106,43 @@ impl NexusSaga for SagaSnapshotDelete { async fn ssd_delete_snapshot_record( sagactx: NexusActionContext, ) -> Result<(), ActionError> { - todo!(); + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); + + osagactx + .datastore() + .project_delete_snapshot( + &opctx, + ¶ms.authz_snapshot, + ¶ms.snapshot, + ) + .await + .map_err(ActionError::action_failed)?; + Ok(()) } async fn ssd_account_space( sagactx: NexusActionContext, ) -> Result<(), ActionError> { - todo!(); - /* let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; - - let snapshot_created = - sagactx.lookup::("created_snapshot")?; let opctx = OpContext::for_saga_action(&sagactx, ¶ms.serialized_authn); osagactx .datastore() .virtual_provisioning_collection_delete_snapshot( &opctx, - snapshot_created.id(), - params.project_id, - // TODO: How many bytes? read while deleting? + params.authz_snapshot.id(), + params.snapshot.project_id, + -i64::try_from(params.snapshot.size.to_bytes()) + .map_err(|e| { + Error::internal_error(&format!( + "updating resource provisioning: {e}" + )) + }) + .map_err(ActionError::action_failed)?, ) .await .map_err(ActionError::action_failed)?; - */ Ok(()) } - -async fn ssd_delete_source_volume( - sagactx: NexusActionContext, -) -> Result<(), ActionError> { - todo!(); -} - -async fn ssd_delete_destination_volume( - sagactx: NexusActionContext, -) -> Result<(), ActionError> { - todo!(); -} diff --git a/nexus/src/app/sagas/volume_delete.rs b/nexus/src/app/sagas/volume_delete.rs index 496c4dda34b..df449f67f95 100644 --- a/nexus/src/app/sagas/volume_delete.rs +++ b/nexus/src/app/sagas/volume_delete.rs @@ -74,6 +74,23 @@ declare_saga_actions! { // volume delete saga: definition +pub fn create_dag( + mut builder: steno::DagBuilder, +) -> Result { + builder.append(decrease_crucible_resource_count_action()); + builder.append_parallel(vec![ + // clean up top level regions for volume + delete_crucible_regions_action(), + // clean up snapshots no longer referenced by any volume + delete_crucible_snapshots_action(), + ]); + // clean up regions that were freed by deleting snapshots + builder.append(delete_freed_crucible_regions_action()); + builder.append(hard_delete_volume_record_action()); + + Ok(builder.build()?) +} + #[derive(Debug)] pub struct SagaVolumeDelete; impl NexusSaga for SagaVolumeDelete { @@ -86,20 +103,9 @@ impl NexusSaga for SagaVolumeDelete { fn make_saga_dag( _params: &Self::Params, - mut builder: steno::DagBuilder, + builder: steno::DagBuilder, ) -> Result { - builder.append(decrease_crucible_resource_count_action()); - builder.append_parallel(vec![ - // clean up top level regions for volume - delete_crucible_regions_action(), - // clean up snapshots no longer referenced by any volume - delete_crucible_snapshots_action(), - ]); - // clean up regions that were freed by deleting snapshots - builder.append(delete_freed_crucible_regions_action()); - builder.append(hard_delete_volume_record_action()); - - Ok(builder.build()?) + create_dag(builder) } } From 5f5f15e24ba689dc9e37848e6fc59bccdbf403f1 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 27 Dec 2022 14:29:06 -0500 Subject: [PATCH 46/80] Transactions, deleting collections, validate collection empty on delete --- .../src/virtual_provisioning_collection.rs | 6 ++ nexus/src/app/sagas/disk_create.rs | 2 - nexus/src/app/sagas/disk_delete.rs | 2 - nexus/src/app/sagas/instance_delete.rs | 2 +- nexus/src/app/sagas/snapshot_create.rs | 2 - nexus/src/db/datastore/organization.rs | 61 ++++++++++++------ nexus/src/db/datastore/silo.rs | 63 ++++++++++++------- .../virtual_provisioning_collection.rs | 35 ++++++++++- 8 files changed, 122 insertions(+), 51 deletions(-) diff --git a/nexus/db-model/src/virtual_provisioning_collection.rs b/nexus/db-model/src/virtual_provisioning_collection.rs index a18b8d8153a..def866d7a10 100644 --- a/nexus/db-model/src/virtual_provisioning_collection.rs +++ b/nexus/db-model/src/virtual_provisioning_collection.rs @@ -48,4 +48,10 @@ impl VirtualProvisioningCollection { ram_provisioned: 0, } } + + pub fn is_empty(&self) -> bool { + self.virtual_disk_bytes_provisioned == 0 + && self.cpus_provisioned == 0 + && self.ram_provisioned == 0 + } } diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index 871454c3dd2..5ebc12d5c5a 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -244,7 +244,6 @@ async fn sdc_alloc_regions_undo( Ok(()) } -// TODO: Not yet idempotent async fn sdc_account_space( sagactx: NexusActionContext, ) -> Result<(), ActionError> { @@ -272,7 +271,6 @@ async fn sdc_account_space( Ok(()) } -// TODO: Not yet idempotent async fn sdc_account_space_undo( sagactx: NexusActionContext, ) -> Result<(), anyhow::Error> { diff --git a/nexus/src/app/sagas/disk_delete.rs b/nexus/src/app/sagas/disk_delete.rs index e87ebe4f309..a405a2aa162 100644 --- a/nexus/src/app/sagas/disk_delete.rs +++ b/nexus/src/app/sagas/disk_delete.rs @@ -82,7 +82,6 @@ async fn sdd_delete_disk_record( Ok(disk) } -// TODO: Not yet idempotent async fn sdd_account_space( sagactx: NexusActionContext, ) -> Result<(), ActionError> { @@ -110,7 +109,6 @@ async fn sdd_account_space( Ok(()) } -// TODO: Not yet idempotent async fn sdd_account_space_undo( sagactx: NexusActionContext, ) -> Result<(), anyhow::Error> { diff --git a/nexus/src/app/sagas/instance_delete.rs b/nexus/src/app/sagas/instance_delete.rs index c7a32b230d0..8a0b7329e3d 100644 --- a/nexus/src/app/sagas/instance_delete.rs +++ b/nexus/src/app/sagas/instance_delete.rs @@ -37,7 +37,7 @@ declare_saga_actions! { DEALLOCATE_EXTERNAL_IP -> "no_result3" { + sid_deallocate_external_ip } - RESOURCES_ACCOUNT -> "no_reult4" { + RESOURCES_ACCOUNT -> "no_result4" { + sid_account_resources } } diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index 1f077caff7d..de99054b1bb 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -477,7 +477,6 @@ async fn ssc_create_snapshot_record_undo( Ok(()) } -// TODO: Not yet idempotent async fn ssc_account_space( sagactx: NexusActionContext, ) -> Result<(), ActionError> { @@ -506,7 +505,6 @@ async fn ssc_account_space( Ok(()) } -// TODO: Not yet idempotent async fn ssc_account_space_undo( sagactx: NexusActionContext, ) -> Result<(), anyhow::Error> { diff --git a/nexus/src/db/datastore/organization.rs b/nexus/src/db/datastore/organization.rs index a5ac7b49886..a30a1e70505 100644 --- a/nexus/src/db/datastore/organization.rs +++ b/nexus/src/db/datastore/organization.rs @@ -23,7 +23,7 @@ use crate::db::model::Silo; use crate::db::model::VirtualProvisioningCollection; use crate::db::pagination::paginated; use crate::external_api::params; -use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl}; +use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl, PoolError}; use chrono::Utc; use diesel::prelude::*; use omicron_common::api::external::CreateResult; @@ -135,27 +135,48 @@ impl DataStore { } let now = Utc::now(); - let updated_rows = diesel::update(dsl::organization) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(authz_org.id())) - .filter(dsl::rcgen.eq(db_org.rcgen)) - .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_org), + + type TxnError = TransactionError; + self.pool_authorized(opctx) + .await? + .transaction_async(|conn| async move { + let updated_rows = diesel::update(dsl::organization) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(authz_org.id())) + .filter(dsl::rcgen.eq(db_org.rcgen)) + .set(dsl::time_deleted.eq(now)) + .execute_async(&conn) + .await + .map_err(|e| { + public_error_from_diesel_pool( + PoolError::from(e), + ErrorHandler::NotFoundByResource(authz_org), + ) + })?; + + if updated_rows == 0 { + return Err(TxnError::CustomError(Error::InvalidRequest { + message: + "deletion failed due to concurrent modification" + .to_string(), + })); + } + + self.virtual_provisioning_collection_delete_on_connection( + &conn, + authz_org.id(), ) - })?; + .await?; - if updated_rows == 0 { - return Err(Error::InvalidRequest { - message: "deletion failed due to concurrent modification" - .to_string(), - }); - } - Ok(()) + Ok(()) + }) + .await + .map_err(|e| match e { + TxnError::CustomError(e) => e, + TxnError::Pool(e) => { + public_error_from_diesel_pool(e, ErrorHandler::Server) + } + }) } pub async fn organizations_list_by_id( diff --git a/nexus/src/db/datastore/silo.rs b/nexus/src/db/datastore/silo.rs index 6fa042b0e1b..9a80454231c 100644 --- a/nexus/src/db/datastore/silo.rs +++ b/nexus/src/db/datastore/silo.rs @@ -24,6 +24,7 @@ use crate::external_api::params; use crate::external_api::shared; use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; +use async_bb8_diesel::PoolError; use chrono::Utc; use diesel::prelude::*; use omicron_common::api::external::CreateResult; @@ -158,7 +159,7 @@ impl DataStore { self.virtual_provisioning_collection_create_on_connection( &conn, VirtualProvisioningCollection::new( - DEFAULT_SILO.id(), + silo.id(), CollectionTypeProvisioned::Silo, ), ) @@ -258,28 +259,48 @@ impl DataStore { } let now = Utc::now(); - let updated_rows = diesel::update(silo::dsl::silo) - .filter(silo::dsl::time_deleted.is_null()) - .filter(silo::dsl::id.eq(id)) - .filter(silo::dsl::rcgen.eq(rcgen)) - .set(silo::dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_silo), - ) - })?; - if updated_rows == 0 { - return Err(Error::InvalidRequest { - message: "silo deletion failed due to concurrent modification" - .to_string(), - }); - } + type TxnError = TransactionError; + self.pool_authorized(opctx) + .await? + .transaction_async(|conn| async move { + let updated_rows = diesel::update(silo::dsl::silo) + .filter(silo::dsl::time_deleted.is_null()) + .filter(silo::dsl::id.eq(id)) + .filter(silo::dsl::rcgen.eq(rcgen)) + .set(silo::dsl::time_deleted.eq(now)) + .execute_async(&conn) + .await + .map_err(|e| { + public_error_from_diesel_pool( + PoolError::from(e), + ErrorHandler::NotFoundByResource(authz_silo), + ) + })?; + + if updated_rows == 0 { + return Err(TxnError::CustomError(Error::InvalidRequest { + message: "silo deletion failed due to concurrent modification" + .to_string(), + })); + } + + info!(opctx.log, "deleted silo {}", id); + + self.virtual_provisioning_collection_delete_on_connection( + &conn, + id, + ).await?; - info!(opctx.log, "deleted silo {}", id); + Ok(()) + }) + .await + .map_err(|e| match e { + TxnError::CustomError(e) => e, + TxnError::Pool(e) => { + public_error_from_diesel_pool(e, ErrorHandler::Server) + } + })?; // TODO-correctness This needs to happen in a saga or some other // mechanism that ensures it happens even if we crash at this point. diff --git a/nexus/src/db/datastore/virtual_provisioning_collection.rs b/nexus/src/db/datastore/virtual_provisioning_collection.rs index 64dc3f85156..365d7d7a57f 100644 --- a/nexus/src/db/datastore/virtual_provisioning_collection.rs +++ b/nexus/src/db/datastore/virtual_provisioning_collection.rs @@ -221,15 +221,44 @@ impl DataStore { opctx: &OpContext, id: Uuid, ) -> DeleteResult { + let pool = self.pool_authorized(opctx).await?; + self.virtual_provisioning_collection_delete_on_connection(pool, id) + .await + } + + /// Delete a [`VirtualProvisioningCollection`] object. + pub(crate) async fn virtual_provisioning_collection_delete_on_connection< + ConnErr, + >( + &self, + conn: &(impl async_bb8_diesel::AsyncConnection + + Sync), + id: Uuid, + ) -> DeleteResult + where + ConnErr: From + Send + 'static, + PoolError: From, + { use db::schema::virtual_provisioning_collection::dsl; - diesel::delete(dsl::virtual_provisioning_collection) + // NOTE: We don't really need to extract the value we're deleting from + // the DB, but by doing so, we can validate that we haven't + // miscalculated our usage accounting. + let collection = diesel::delete(dsl::virtual_provisioning_collection) .filter(dsl::id.eq(id)) - .execute_async(self.pool_authorized(opctx).await?) + .returning(VirtualProvisioningCollection::as_select()) + .get_result_async(conn) .await .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) + public_error_from_diesel_pool( + PoolError::from(e), + ErrorHandler::Server, + ) })?; + assert!( + collection.is_empty(), + "Collection deleted while non-empty: {collection:?}" + ); Ok(()) } From 999585cbeaaded50b8adbf96d7c0481a2aa8fb5d Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 27 Dec 2022 14:38:21 -0500 Subject: [PATCH 47/80] Remove fleet from views --- nexus/types/src/external_api/views.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index 889bc0db7c0..9dd32206d4c 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -289,15 +289,6 @@ pub struct ExternalIp { pub kind: IpKind, } -// FLEET - -/// Client view of a [`Fleet`] -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct Fleet { - #[serde(flatten)] - pub identity: AssetIdentityMetadata, -} - // RACKS /// Client view of an [`Rack`] From 109172dd5cb1d342c8a303620890eebfe023c4f5 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 6 Jan 2023 01:11:15 -0500 Subject: [PATCH 48/80] Add resource accounting to saga idempotency/unwind tests --- nexus/src/app/sagas/disk_create.rs | 62 ++++++++++++++++++- nexus/src/app/sagas/instance_create.rs | 85 +++++++++++++++++++++++++- nexus/src/app/sagas/project_create.rs | 39 +++++++++++- nexus/src/db/mod.rs | 2 + nexus/src/db/pool.rs | 1 + 5 files changed, 185 insertions(+), 4 deletions(-) diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index c6b06baba87..f87658ea2e0 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -646,7 +646,10 @@ pub(crate) mod test { app::sagas::disk_create::SagaDiskCreate, authn::saga::Serialized, context::OpContext, db::datastore::DataStore, external_api::params, }; - use async_bb8_diesel::{AsyncRunQueryDsl, OptionalExtension}; + use async_bb8_diesel::{ + AsyncConnection, AsyncRunQueryDsl, AsyncSimpleConnection, + OptionalExtension, + }; use diesel::{ExpressionMethods, QueryDsl, SelectableHelper}; use dropshot::test_util::ClientTestContext; use nexus_test_utils::resource_helpers::create_ip_pool; @@ -750,6 +753,56 @@ pub(crate) mod test { .is_none() } + async fn no_virtual_provisioning_resource_records_exist( + datastore: &DataStore, + ) -> bool { + use crate::db::model::VirtualProvisioningResource; + use crate::db::schema::virtual_provisioning_resource::dsl; + + dsl::virtual_provisioning_resource + .select(VirtualProvisioningResource::as_select()) + .first_async::( + datastore.pool_for_tests().await.unwrap(), + ) + .await + .optional() + .unwrap() + .is_none() + } + + async fn no_virtual_provisioning_collection_records_using_storage( + datastore: &DataStore, + ) -> bool { + use crate::db::model::VirtualProvisioningCollection; + use crate::db::schema::virtual_provisioning_collection::dsl; + + datastore + .pool_for_tests() + .await + .unwrap() + .transaction_async(|conn| async move { + conn.batch_execute_async( + "set disallow_full_table_scans = off;\ + set large_full_scan_rows = 1000;", + ) + .await + .unwrap(); + Ok::<_, crate::db::TransactionError<()>>( + dsl::virtual_provisioning_collection + .filter(dsl::virtual_disk_bytes_provisioned.ne(0)) + .select(VirtualProvisioningCollection::as_select()) + .get_results_async::( + &conn, + ) + .await + .unwrap() + .is_empty(), + ) + }) + .await + .unwrap() + } + async fn no_region_allocations_exist( datastore: &DataStore, test: &DiskTest, @@ -794,6 +847,13 @@ pub(crate) mod test { assert!(no_disk_records_exist(datastore).await); assert!(no_volume_records_exist(datastore).await); + assert!( + no_virtual_provisioning_resource_records_exist(datastore).await + ); + assert!( + no_virtual_provisioning_collection_records_using_storage(datastore) + .await + ); assert!(no_region_allocations_exist(datastore, &test).await); assert!(no_regions_ensured(&sled_agent, &test).await); } diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index 2d544ce39af..3bef048367c 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -1030,8 +1030,13 @@ pub mod test { authn::saga::Serialized, context::OpContext, db::datastore::DataStore, external_api::params, }; - use async_bb8_diesel::{AsyncRunQueryDsl, OptionalExtension}; - use diesel::{ExpressionMethods, QueryDsl, SelectableHelper}; + use async_bb8_diesel::{ + AsyncConnection, AsyncRunQueryDsl, AsyncSimpleConnection, + OptionalExtension, + }; + use diesel::{ + BoolExpressionMethods, ExpressionMethods, QueryDsl, SelectableHelper, + }; use dropshot::test_util::ClientTestContext; use nexus_test_utils::resource_helpers::create_disk; use nexus_test_utils::resource_helpers::create_organization; @@ -1166,6 +1171,73 @@ pub mod test { .is_none() } + async fn no_virtual_provisioning_resource_records_exist( + datastore: &DataStore, + ) -> bool { + use crate::db::model::VirtualProvisioningResource; + use crate::db::schema::virtual_provisioning_resource::dsl; + + datastore.pool_for_tests() + .await + .unwrap() + .transaction_async(|conn| async move { + conn + .batch_execute_async( + "set disallow_full_table_scans = off;\ + set large_full_scan_rows = 1000;" + ) + .await + .unwrap(); + + Ok::<_, crate::db::TransactionError<()>>( + dsl::virtual_provisioning_resource + .filter(dsl::resource_type.eq(crate::db::model::ResourceTypeProvisioned::Instance.to_string())) + .select(VirtualProvisioningResource::as_select()) + .get_results_async::(&conn) + .await + .unwrap() + .is_empty() + ) + }).await.unwrap() + } + + async fn no_virtual_provisioning_collection_records_using_instances( + datastore: &DataStore, + ) -> bool { + use crate::db::model::VirtualProvisioningCollection; + use crate::db::schema::virtual_provisioning_collection::dsl; + + datastore + .pool_for_tests() + .await + .unwrap() + .transaction_async(|conn| async move { + conn.batch_execute_async( + "set disallow_full_table_scans = off;\ + set large_full_scan_rows = 1000;", + ) + .await + .unwrap(); + Ok::<_, crate::db::TransactionError<()>>( + dsl::virtual_provisioning_collection + .filter( + dsl::cpus_provisioned + .ne(0) + .or(dsl::ram_provisioned.ne(0)), + ) + .select(VirtualProvisioningCollection::as_select()) + .get_results_async::( + &conn, + ) + .await + .unwrap() + .is_empty(), + ) + }) + .await + .unwrap() + } + async fn disk_is_detached(datastore: &DataStore) -> bool { use crate::db::model::Disk; use crate::db::schema::disk::dsl; @@ -1197,6 +1269,15 @@ pub mod test { assert!(no_instance_records_exist(datastore).await); assert!(no_network_interface_records_exist(datastore).await); assert!(no_external_ip_records_exist(datastore).await); + assert!( + no_virtual_provisioning_resource_records_exist(datastore).await + ); + assert!( + no_virtual_provisioning_collection_records_using_instances( + datastore + ) + .await + ); assert!(disk_is_detached(datastore).await); assert!(no_instances_or_disks_on_sled(&sled_agent).await); } diff --git a/nexus/src/app/sagas/project_create.rs b/nexus/src/app/sagas/project_create.rs index 22c6cc7a907..258447b8ded 100644 --- a/nexus/src/app/sagas/project_create.rs +++ b/nexus/src/app/sagas/project_create.rs @@ -151,7 +151,10 @@ mod test { authz, context::OpContext, db::datastore::DataStore, external_api::params, }; - use async_bb8_diesel::{AsyncRunQueryDsl, OptionalExtension}; + use async_bb8_diesel::{ + AsyncConnection, AsyncRunQueryDsl, AsyncSimpleConnection, + OptionalExtension, + }; use diesel::{ExpressionMethods, QueryDsl, SelectableHelper}; use dropshot::test_util::ClientTestContext; use nexus_test_utils::resource_helpers::create_organization; @@ -216,6 +219,10 @@ mod test { async fn verify_clean_slate(datastore: &DataStore) { assert!(no_projects_exist(datastore).await); + assert!( + no_virtual_provisioning_collection_records_for_projects(datastore) + .await + ); crate::app::sagas::vpc_create::test::verify_clean_slate(datastore) .await; } @@ -237,6 +244,36 @@ mod test { .is_none() } + async fn no_virtual_provisioning_collection_records_for_projects( + datastore: &DataStore, + ) -> bool { + use crate::db::model::VirtualProvisioningCollection; + use crate::db::schema::virtual_provisioning_collection::dsl; + + datastore.pool_for_tests() + .await + .unwrap() + .transaction_async(|conn| async move { + conn + .batch_execute_async( + "set disallow_full_table_scans = off;\ + set large_full_scan_rows = 1000;" + ) + .await + .unwrap(); + Ok::<_, crate::db::TransactionError<()>>( + dsl::virtual_provisioning_collection + .filter(dsl::collection_type.eq(crate::db::model::CollectionTypeProvisioned::Project.to_string())) + + .select(VirtualProvisioningCollection::as_select()) + .get_results_async::(&conn) + .await + .unwrap() + .is_empty() + ) + }).await.unwrap() + } + #[nexus_test(server = crate::Server)] async fn test_saga_basic_usage_succeeds( cptestctx: &ControlPlaneTestContext, diff --git a/nexus/src/db/mod.rs b/nexus/src/db/mod.rs index 26eee7eec0b..3ba4c5f7360 100644 --- a/nexus/src/db/mod.rs +++ b/nexus/src/db/mod.rs @@ -37,6 +37,8 @@ pub use nexus_db_model as model; use nexus_db_model::saga_types; pub use nexus_db_model::schema; +#[cfg(test)] +pub use crate::db::error::TransactionError; pub use config::Config; pub use datastore::DataStore; pub use pool::Pool; diff --git a/nexus/src/db/pool.rs b/nexus/src/db/pool.rs index 335b5bafb73..55231c18f56 100644 --- a/nexus/src/db/pool.rs +++ b/nexus/src/db/pool.rs @@ -53,6 +53,7 @@ impl Pool { Pool { pool } } + #[cfg(test)] pub fn new_failfast(db_config: &DbConfig) -> Self { let manager = ConnectionManager::::new(&db_config.url.url()); From 2cada78db2d3ea794daefeaa9f63b2eee3dfaa0c Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 6 Jan 2023 12:47:53 -0500 Subject: [PATCH 49/80] cleanup on project delete --- nexus/src/db/datastore/project.rs | 62 ++++++++++++++++++++----------- 1 file changed, 41 insertions(+), 21 deletions(-) diff --git a/nexus/src/db/datastore/project.rs b/nexus/src/db/datastore/project.rs index 3e7d1832cbe..707edaa4e1b 100644 --- a/nexus/src/db/datastore/project.rs +++ b/nexus/src/db/datastore/project.rs @@ -22,7 +22,7 @@ use crate::db::model::Project; use crate::db::model::ProjectUpdate; use crate::db::model::VirtualProvisioningCollection; use crate::db::pagination::paginated; -use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl}; +use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl, PoolError}; use chrono::Utc; use diesel::prelude::*; use omicron_common::api::external::CreateResult; @@ -185,28 +185,48 @@ impl DataStore { use db::schema::project::dsl; - let now = Utc::now(); - let updated_rows = diesel::update(dsl::project) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(authz_project.id())) - .filter(dsl::rcgen.eq(db_project.rcgen)) - .set(dsl::time_deleted.eq(now)) - .returning(Project::as_returning()) - .execute_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_project), + type TxnError = TransactionError; + self.pool_authorized(opctx) + .await? + .transaction_async(|conn| async move { + let now = Utc::now(); + let updated_rows = diesel::update(dsl::project) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(authz_project.id())) + .filter(dsl::rcgen.eq(db_project.rcgen)) + .set(dsl::time_deleted.eq(now)) + .returning(Project::as_returning()) + .execute_async(&conn) + .await + .map_err(|e| { + public_error_from_diesel_pool( + PoolError::from(e), + ErrorHandler::NotFoundByResource(authz_project), + ) + })?; + + if updated_rows == 0 { + return Err(TxnError::CustomError(Error::InvalidRequest { + message: + "deletion failed due to concurrent modification" + .to_string(), + })); + } + + self.virtual_provisioning_collection_delete_on_connection( + &conn, + db_project.id(), ) + .await?; + Ok(()) + }) + .await + .map_err(|e| match e { + TxnError::CustomError(e) => e, + TxnError::Pool(e) => { + public_error_from_diesel_pool(e, ErrorHandler::Server) + } })?; - - if updated_rows == 0 { - return Err(Error::InvalidRequest { - message: "deletion failed due to concurrent modification" - .to_string(), - }); - } Ok(()) } From fbc1fe470bdcdd8e3fd0146bdaa9b6f256c6d709 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 6 Jan 2023 14:24:50 -0500 Subject: [PATCH 50/80] cleanup tests --- nexus/src/app/sagas/disk_create.rs | 9 +++------ nexus/src/app/sagas/instance_create.rs | 16 ++++------------ nexus/src/app/sagas/project_create.rs | 5 +---- nexus/src/db/mod.rs | 2 ++ nexus/src/db/pool.rs | 11 ++++++----- 5 files changed, 16 insertions(+), 27 deletions(-) diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index 949f144d2ca..749bb425ef0 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -785,12 +785,9 @@ pub(crate) mod test { .await .unwrap() .transaction_async(|conn| async move { - conn.batch_execute_async( - "set disallow_full_table_scans = off;\ - set large_full_scan_rows = 1000;", - ) - .await - .unwrap(); + conn.batch_execute_async(crate::db::ALLOW_FULL_TABLE_SCAN_SQL) + .await + .unwrap(); Ok::<_, crate::db::TransactionError<()>>( dsl::virtual_provisioning_collection .filter(dsl::virtual_disk_bytes_provisioned.ne(0)) diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index 3bef048367c..f390e0f0eb7 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -776,7 +776,6 @@ pub(super) async fn allocate_sled_ipv6( .map_err(ActionError::action_failed) } -// TODO: Not yet idempotent async fn sic_account_resources( sagactx: NexusActionContext, ) -> Result<(), ActionError> { @@ -805,7 +804,6 @@ async fn sic_account_resources( Ok(()) } -// TODO: Not yet idempotent async fn sic_account_resources_undo( sagactx: NexusActionContext, ) -> Result<(), anyhow::Error> { @@ -1182,10 +1180,7 @@ pub mod test { .unwrap() .transaction_async(|conn| async move { conn - .batch_execute_async( - "set disallow_full_table_scans = off;\ - set large_full_scan_rows = 1000;" - ) + .batch_execute_async(crate::db::ALLOW_FULL_TABLE_SCAN_SQL) .await .unwrap(); @@ -1212,12 +1207,9 @@ pub mod test { .await .unwrap() .transaction_async(|conn| async move { - conn.batch_execute_async( - "set disallow_full_table_scans = off;\ - set large_full_scan_rows = 1000;", - ) - .await - .unwrap(); + conn.batch_execute_async(crate::db::ALLOW_FULL_TABLE_SCAN_SQL) + .await + .unwrap(); Ok::<_, crate::db::TransactionError<()>>( dsl::virtual_provisioning_collection .filter( diff --git a/nexus/src/app/sagas/project_create.rs b/nexus/src/app/sagas/project_create.rs index 258447b8ded..14a13bca44f 100644 --- a/nexus/src/app/sagas/project_create.rs +++ b/nexus/src/app/sagas/project_create.rs @@ -255,10 +255,7 @@ mod test { .unwrap() .transaction_async(|conn| async move { conn - .batch_execute_async( - "set disallow_full_table_scans = off;\ - set large_full_scan_rows = 1000;" - ) + .batch_execute_async(crate::db::ALLOW_FULL_TABLE_SCAN_SQL) .await .unwrap(); Ok::<_, crate::db::TransactionError<()>>( diff --git a/nexus/src/db/mod.rs b/nexus/src/db/mod.rs index 3ba4c5f7360..d76d5dac3d7 100644 --- a/nexus/src/db/mod.rs +++ b/nexus/src/db/mod.rs @@ -42,6 +42,8 @@ pub use crate::db::error::TransactionError; pub use config::Config; pub use datastore::DataStore; pub use pool::Pool; +#[cfg(test)] +pub use pool::ALLOW_FULL_TABLE_SCAN_SQL; pub use saga_recovery::{recover, RecoveryTask}; pub use saga_types::SecId; pub use sec_store::CockroachDbSecStore; diff --git a/nexus/src/db/pool.rs b/nexus/src/db/pool.rs index 55231c18f56..75e65b3bf67 100644 --- a/nexus/src/db/pool.rs +++ b/nexus/src/db/pool.rs @@ -70,6 +70,11 @@ impl Pool { } } +const DISALLOW_FULL_TABLE_SCAN_SQL: &str = + "set disallow_full_table_scans = on; set large_full_scan_rows = 0;"; +pub const ALLOW_FULL_TABLE_SCAN_SQL: &str = + "set disallow_full_table_scans = off; set large_full_scan_rows = 1000;"; + #[derive(Debug)] struct DisallowFullTableScans {} #[async_trait] @@ -80,10 +85,6 @@ impl CustomizeConnection, ConnectionError> &self, conn: &mut Connection, ) -> Result<(), ConnectionError> { - conn.batch_execute_async( - "set disallow_full_table_scans = on;\ - set large_full_scan_rows = 0;", - ) - .await + conn.batch_execute_async(DISALLOW_FULL_TABLE_SCAN_SQL).await } } From dd326e8260cb2785c0e3fefd16bb6cc95292d250 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 6 Jan 2023 14:25:35 -0500 Subject: [PATCH 51/80] testonly --- nexus/src/db/pool.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/nexus/src/db/pool.rs b/nexus/src/db/pool.rs index 75e65b3bf67..fb44e9be200 100644 --- a/nexus/src/db/pool.rs +++ b/nexus/src/db/pool.rs @@ -72,6 +72,7 @@ impl Pool { const DISALLOW_FULL_TABLE_SCAN_SQL: &str = "set disallow_full_table_scans = on; set large_full_scan_rows = 0;"; +#[cfg(test)] pub const ALLOW_FULL_TABLE_SCAN_SQL: &str = "set disallow_full_table_scans = off; set large_full_scan_rows = 1000;"; From 2e1c25ab0f29e6220a68ceacfc98b44a50176549 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 9 Jan 2023 00:41:26 -0500 Subject: [PATCH 52/80] Add more tests, oximeter test API --- nexus/src/app/provisioning.rs | 119 +++++++++++++++++++++ nexus/tests/integration_tests/disks.rs | 27 +---- nexus/tests/integration_tests/instances.rs | 89 +++++++++++++-- nexus/tests/integration_tests/metrics.rs | 68 ++++++++++++ nexus/tests/integration_tests/mod.rs | 1 + oximeter/collector/src/lib.rs | 115 ++++++++++++-------- 6 files changed, 349 insertions(+), 70 deletions(-) create mode 100644 nexus/src/app/provisioning.rs create mode 100644 nexus/tests/integration_tests/metrics.rs diff --git a/nexus/src/app/provisioning.rs b/nexus/src/app/provisioning.rs new file mode 100644 index 00000000000..5c001baf187 --- /dev/null +++ b/nexus/src/app/provisioning.rs @@ -0,0 +1,119 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Types to export metrics about provisioning information. + +use crate::db::model::VirtualProvisioningCollection; +use oximeter::{types::Sample, Metric, MetricsError, Target}; +use std::sync::{Arc, Mutex}; +use uuid::Uuid; + +/// Describes a collection that holds other resources. +/// +/// Example targets might include projects, organizations, silos or fleets. +#[derive(Debug, Clone, Target)] +struct CollectionTarget { + id: Uuid, +} + +#[derive(Debug, Clone, Metric)] +struct VirtualDiskSpaceProvisioned { + #[datum] + bytes_used: i64, +} + +#[derive(Debug, Clone, Metric)] +struct CpusProvisioned { + #[datum] + cpus: i64, +} + +#[derive(Debug, Clone, Metric)] +struct RamProvisioned { + #[datum] + bytes: i64, +} + +/// An oximeter producer for reporting [`VirtualProvisioningCollection`] information to Clickhouse. +/// +/// This producer collects samples whenever the database record for a collection +/// is created or updated. This implies that the CockroachDB record is always +/// kept up-to-date, and the Clickhouse historical records are batched and +/// transmitted once they are collected (as is the norm for Clickhouse metrics). +#[derive(Debug, Default, Clone)] +pub(crate) struct Producer { + samples: Arc>>, +} + +impl Producer { + pub fn new() -> Self { + Self { samples: Arc::new(Mutex::new(vec![])) } + } + + pub fn append_all_metrics( + &self, + provisions: &Vec, + ) { + self.append_cpu_metrics(&provisions); + self.append_disk_metrics(&provisions); + } + + pub fn append_disk_metrics( + &self, + provisions: &Vec, + ) { + let new_samples = provisions + .iter() + .map(|provision| { + Sample::new( + &CollectionTarget { id: provision.id }, + &VirtualDiskSpaceProvisioned { + bytes_used: provision.virtual_disk_bytes_provisioned, + }, + ) + }) + .collect::>(); + + self.append(new_samples); + } + + pub fn append_cpu_metrics( + &self, + provisions: &Vec, + ) { + let new_samples = provisions + .iter() + .map(|provision| { + Sample::new( + &CollectionTarget { id: provision.id }, + &CpusProvisioned { cpus: provision.cpus_provisioned }, + ) + }) + .chain(provisions.iter().map(|provision| { + Sample::new( + &CollectionTarget { id: provision.id }, + &RamProvisioned { bytes: provision.ram_provisioned }, + ) + })) + .collect::>(); + + self.append(new_samples); + } + + fn append(&self, mut new_samples: Vec) { + let mut pending_samples = self.samples.lock().unwrap(); + pending_samples.append(&mut new_samples); + } +} + +impl oximeter::Producer for Producer { + fn produce( + &mut self, + ) -> Result + 'static>, MetricsError> { + let samples = + std::mem::replace(&mut *self.samples.lock().unwrap(), vec![]); + Ok(Box::new(samples.into_iter())) + } +} + diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index f7a1eca5894..e279ba34b09 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -4,6 +4,8 @@ //! Tests basic disk support in the API +use super::metrics::query_for_metrics_until_they_exist; + use chrono::Utc; use crucible_agent_client::types::State as RegionState; use dropshot::test_util::ClientTestContext; @@ -31,7 +33,6 @@ use omicron_common::api::external::DiskState; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::Instance; use omicron_common::api::external::Name; -use omicron_common::backoff; use omicron_nexus::db::fixed_data::{silo::SILO_ID, FLEET_ID}; use omicron_nexus::TestInterfaces as _; use omicron_nexus::{context::OpContext, external_api::params, Nexus}; @@ -1366,33 +1367,13 @@ async fn create_instance_with_disk(client: &ClientTestContext) { const ALL_METRICS: [&'static str; 6] = ["activated", "read", "write", "read_bytes", "write_bytes", "flush"]; -async fn query_for_metrics_until_they_exist( - client: &ClientTestContext, - path: &str, -) -> ResultsPage { - backoff::retry_notify( - backoff::retry_policy_local(), - || async { - let measurements: ResultsPage = - objects_list_page_authz(client, path).await; - - if measurements.items.is_empty() { - return Err(backoff::BackoffError::transient("No metrics yet")); - } - Ok(measurements) - }, - |_, _| {}, - ) - .await - .expect("Failed to query for measurements") -} - #[nexus_test] async fn test_disk_metrics(cptestctx: &ControlPlaneTestContext) { // Normally, Nexus is not registered as a producer for tests. // Turn this bit on so we can also test some metrics from Nexus itself. cptestctx.server.register_as_producer().await; + let oximeter = &cptestctx.oximeter; let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; let project_id = create_org_and_project(client).await; @@ -1412,12 +1393,14 @@ async fn test_disk_metrics(cptestctx: &ControlPlaneTestContext) { // // Observe that no metrics exist yet; no "upstairs" should have been // instantiated on a sled. + oximeter.force_collect().await; let measurements: ResultsPage = objects_list_page_authz(client, &metric_url("read")).await; assert!(measurements.items.is_empty()); // Create an instance, attach the disk to it. create_instance_with_disk(client).await; + oximeter.force_collect().await; for metric in &ALL_METRICS { let measurements = diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index f0ea7d078e2..ad5b96a4800 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -4,6 +4,9 @@ //! Tests basic instance support in the API +use super::metrics::query_for_metrics_until_it_contains; + +use chrono::Utc; use http::method::Method; use http::StatusCode; use nexus_test_utils::http_testing::AuthnMode; @@ -508,14 +511,20 @@ async fn test_instances_create_reboot_halt( #[nexus_test] async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { + // Normally, Nexus is not registered as a producer for tests. + // Turn this bit on so we can also test some metrics from Nexus itself. + cptestctx.server.register_as_producer().await; + let client = &cptestctx.external_client; + let oximeter = &cptestctx.oximeter; let apictx = &cptestctx.server.apictx; let nexus = &apictx.nexus; let datastore = nexus.datastore(); // Create an IP pool and project that we'll use for testing. populate_ip_pool(&client, "default", None).await; - create_organization(&client, ORGANIZATION_NAME).await; + let organization_id = + create_organization(&client, ORGANIZATION_NAME).await.identity.id; let url_instances = format!( "/organizations/{}/projects/{}/instances", ORGANIZATION_NAME, PROJECT_NAME @@ -525,6 +534,7 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { .identity .id; + // Query the view of these metrics stored within CRDB let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); let virtual_provisioning_collection = datastore @@ -534,6 +544,39 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { assert_eq!(virtual_provisioning_collection.cpus_provisioned, 0); assert_eq!(virtual_provisioning_collection.ram_provisioned, 0); + // Query the view of these metrics stored within Clickhouse + let metric_url = |metric_type: &str, id: Uuid| { + format!( + "/system/metrics/{metric_type}?start_time={:?}&end_time={:?}&id={id}", + Utc::now() - chrono::Duration::seconds(10), + Utc::now() + chrono::Duration::seconds(10), + ) + }; + oximeter.force_collect().await; + for id in vec![organization_id, project_id] { + query_for_metrics_until_it_contains( + client, + &metric_url("virtual_disk_space_provisioned", id), + 0, + 0, + ) + .await; + query_for_metrics_until_it_contains( + client, + &metric_url("cpus_provisioned", id), + 0, + 0, + ) + .await; + query_for_metrics_until_it_contains( + client, + &metric_url("ram_provisioned", id), + 0, + 0, + ) + .await; + } + // Create an instance. let instance_url = format!("{}/just-rainsticks", url_instances); create_instance(client, ORGANIZATION_NAME, PROJECT_NAME, "just-rainsticks") @@ -565,11 +608,28 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { .virtual_provisioning_collection_get(&opctx, project_id) .await .unwrap(); - assert_eq!(virtual_provisioning_collection.cpus_provisioned, 4); - assert_eq!( - virtual_provisioning_collection.ram_provisioned, - i64::try_from(ByteCount::from_gibibytes_u32(1).to_bytes()).unwrap(), - ); + let expected_cpus = 4; + let expected_ram = + i64::try_from(ByteCount::from_gibibytes_u32(1).to_bytes()).unwrap(); + assert_eq!(virtual_provisioning_collection.cpus_provisioned, expected_cpus); + assert_eq!(virtual_provisioning_collection.ram_provisioned, expected_ram,); + oximeter.force_collect().await; + for id in vec![organization_id, project_id] { + query_for_metrics_until_it_contains( + client, + &metric_url("cpus_provisioned", id), + 1, + expected_cpus, + ) + .await; + query_for_metrics_until_it_contains( + client, + &metric_url("ram_provisioned", id), + 1, + expected_ram, + ) + .await; + } // Stop the instance NexusRequest::object_delete(client, &instance_url) @@ -584,6 +644,23 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { .unwrap(); assert_eq!(virtual_provisioning_collection.cpus_provisioned, 0); assert_eq!(virtual_provisioning_collection.ram_provisioned, 0); + oximeter.force_collect().await; + for id in vec![organization_id, project_id] { + query_for_metrics_until_it_contains( + client, + &metric_url("cpus_provisioned", id), + 2, + 0, + ) + .await; + query_for_metrics_until_it_contains( + client, + &metric_url("ram_provisioned", id), + 2, + 0, + ) + .await; + } } #[nexus_test] diff --git a/nexus/tests/integration_tests/metrics.rs b/nexus/tests/integration_tests/metrics.rs new file mode 100644 index 00000000000..a196450ccb6 --- /dev/null +++ b/nexus/tests/integration_tests/metrics.rs @@ -0,0 +1,68 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use dropshot::test_util::ClientTestContext; +use dropshot::ResultsPage; +use nexus_test_utils::resource_helpers::objects_list_page_authz; +use omicron_common::backoff; +use oximeter::types::Datum; +use oximeter::types::Measurement; + +pub async fn query_for_metrics_until_they_exist( + client: &ClientTestContext, + path: &str, +) -> ResultsPage { + backoff::retry_notify( + backoff::retry_policy_local(), + || async { + let measurements: ResultsPage = + objects_list_page_authz(client, path).await; + + if measurements.items.is_empty() { + return Err(backoff::BackoffError::transient("No metrics yet")); + } + Ok(measurements) + }, + |error, _| { + eprintln!("Failed to query {path}: {error}"); + }, + ) + .await + .expect("Failed to query for measurements") +} + +pub async fn query_for_metrics_until_it_contains( + client: &ClientTestContext, + path: &str, + index: usize, + value: i64, +) -> ResultsPage { + backoff::retry_notify( + backoff::retry_policy_local(), + || async { + let measurements: ResultsPage = + objects_list_page_authz(client, path).await; + + if measurements.items.len() <= index { + return Err(backoff::BackoffError::transient(format!( + "Not enough metrics yet (only seen: {:?})", + measurements.items + ))); + } + + let item = &measurements.items[index]; + let datum = match item.datum() { + Datum::I64(c) => c, + _ => panic!("Unexpected datum type {:?}", item.datum()), + }; + assert_eq!(*datum, value, "Datum exists, but has the wrong value"); + Ok(measurements) + }, + |error, _| { + eprintln!("Failed to query {path}: {error}"); + }, + ) + .await + .expect("Failed to query for measurements") +} diff --git a/nexus/tests/integration_tests/mod.rs b/nexus/tests/integration_tests/mod.rs index 454114d53e0..0533841b305 100644 --- a/nexus/tests/integration_tests/mod.rs +++ b/nexus/tests/integration_tests/mod.rs @@ -13,6 +13,7 @@ mod disks; mod images; mod instances; mod ip_pools; +mod metrics; mod organizations; mod oximeter; mod password_login; diff --git a/oximeter/collector/src/lib.rs b/oximeter/collector/src/lib.rs index a6f3b551b54..727b81b1e0d 100644 --- a/oximeter/collector/src/lib.rs +++ b/oximeter/collector/src/lib.rs @@ -51,7 +51,6 @@ pub enum Error { #[derive(Debug, Clone)] enum CollectionMessage { // Explicit request that the task collect data from its producer - #[allow(dead_code)] Collect, // Request that the task update its interval and the socket address on which it collects data // from its producer. @@ -61,6 +60,59 @@ enum CollectionMessage { Shutdown, } +async fn perform_collection( + log: &Logger, + client: &reqwest::Client, + producer: &ProducerEndpoint, + outbox: &mpsc::Sender, +) { + info!(log, "collecting from producer"); + let res = client + .get(format!( + "http://{}{}", + producer.address, + producer.collection_route() + )) + .send() + .await; + match res { + Ok(res) => { + if res.status().is_success() { + match res.json::().await { + Ok(results) => { + debug!( + log, + "collected {} total results", + results.len(); + ); + outbox.send(results).await.unwrap(); + } + Err(e) => { + warn!( + log, + "failed to collect results from producer: {}", + e.to_string(); + ); + } + } + } else { + warn!( + log, + "failed to receive metric results from producer"; + "status_code" => res.status().as_u16(), + ); + } + } + Err(e) => { + warn!( + log, + "failed to send collection request to producer: {}", + e.to_string(); + ); + } + } +} + // Background task used to collect metrics from one producer on an interval. // // This function is started by the `OximeterAgent`, when a producer is registered. The task loops @@ -81,6 +133,7 @@ async fn collection_task( "starting oximeter collection task"; "interval" => ?producer.interval, ); + loop { tokio::select! { message = inbox.recv() => { @@ -91,9 +144,11 @@ async fn collection_task( } Some(CollectionMessage::Shutdown) => { debug!(log, "collection task received shutdown request"); + return; }, Some(CollectionMessage::Collect) => { - debug!(log, "collection task received request to collect"); + debug!(log, "collection task received explicit request to collect"); + perform_collection(&log, &client, &producer, &outbox).await; }, Some(CollectionMessage::Update(new_info)) => { producer = new_info; @@ -109,46 +164,7 @@ async fn collection_task( } } _ = collection_timer.tick() => { - info!(log, "collecting from producer"); - let res = client.get(format!("http://{}{}", producer.address, producer.collection_route())) - .send() - .await; - match res { - Ok(res) => { - if res.status().is_success() { - match res.json::().await { - Ok(results) => { - debug!( - log, - "collected {} total results", - results.len(); - ); - outbox.send(results).await.unwrap(); - }, - Err(e) => { - warn!( - log, - "failed to collect results from producer: {}", - e.to_string(); - ); - } - } - } else { - warn!( - log, - "failed to receive metric results from producer"; - "status_code" => res.status().as_u16(), - ); - } - }, - Err(e) => { - warn!( - log, - "failed to send collection request to producer: {}", - e.to_string(); - ); - } - } + perform_collection(&log, &client, &producer, &outbox).await; } } } @@ -351,6 +367,13 @@ impl OximeterAgent { } Ok(()) } + + pub async fn force_collection(&self) { + let collection_tasks = self.collection_tasks.lock().await; + for task in collection_tasks.iter() { + task.1.inbox.send(CollectionMessage::Collect).await.unwrap(); + } + } } /// Configuration used to initialize an oximeter server @@ -523,6 +546,14 @@ impl Oximeter { pub async fn close(self) -> Result<(), Error> { self.server.close().await.map_err(Error::Server) } + + /// Forces Oximeter to perform a collection immediately. + /// + /// This is particularly useful during tests, which would prefer to + /// avoid waiting until a collection interval completes. + pub async fn force_collect(&self) { + self.server.app_private().force_collection().await + } } // Build the HTTP API internal to the control plane From 4db6b9ffe461407140af03d563a91c261f13120e Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Mon, 9 Jan 2023 00:57:10 -0500 Subject: [PATCH 53/80] fix disk_delete test parameters --- nexus/src/app/sagas/disk_delete.rs | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/nexus/src/app/sagas/disk_delete.rs b/nexus/src/app/sagas/disk_delete.rs index a0a2809094f..0c74112e109 100644 --- a/nexus/src/app/sagas/disk_delete.rs +++ b/nexus/src/app/sagas/disk_delete.rs @@ -156,7 +156,8 @@ async fn sdd_delete_volume( pub(crate) mod test { use crate::{ app::saga::create_saga_dag, app::sagas::disk_delete::Params, - app::sagas::disk_delete::SagaDiskDelete, context::OpContext, db, + app::sagas::disk_delete::SagaDiskDelete, authn::saga::Serialized, + context::OpContext, db, }; use dropshot::test_util::ClientTestContext; use nexus_test_utils::resource_helpers::create_ip_pool; @@ -217,11 +218,16 @@ pub(crate) mod test { let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx.nexus; - create_org_and_project(&client).await; + let project_id = create_org_and_project(&client).await; let disk_id = create_disk(&cptestctx).await; // Build the saga DAG with the provided test parameters - let params = Params { disk_id }; + let opctx = test_opctx(&cptestctx); + let params = Params { + serialized_authn: Serialized::for_opctx(&opctx), + project_id, + disk_id, + }; let dag = create_saga_dag::(params).unwrap(); let runnable_saga = nexus.create_runnable_saga(dag).await.unwrap(); @@ -237,11 +243,16 @@ pub(crate) mod test { let client = &cptestctx.external_client; let nexus = &cptestctx.server.apictx.nexus; - create_org_and_project(&client).await; + let project_id = create_org_and_project(&client).await; let disk_id = create_disk(&cptestctx).await; // Build the saga DAG with the provided test parameters - let params = Params { disk_id }; + let opctx = test_opctx(&cptestctx); + let params = Params { + serialized_authn: Serialized::for_opctx(&opctx), + project_id, + disk_id, + }; let dag = create_saga_dag::(params).unwrap(); let runnable_saga = From 254c2ecbedf9ac88e956044df2b1eb56ad817ae4 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 10 Jan 2023 22:38:31 -0500 Subject: [PATCH 54/80] Derive display --- .../src/virtual_provisioning_collection.rs | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/nexus/db-model/src/virtual_provisioning_collection.rs b/nexus/db-model/src/virtual_provisioning_collection.rs index def866d7a10..565ce8c80ae 100644 --- a/nexus/db-model/src/virtual_provisioning_collection.rs +++ b/nexus/db-model/src/virtual_provisioning_collection.rs @@ -3,9 +3,10 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. use crate::schema::virtual_provisioning_collection; +use parse_display::Display; use uuid::Uuid; -#[derive(Debug)] +#[derive(Debug, Display)] pub enum CollectionTypeProvisioned { Project, Organization, @@ -13,19 +14,6 @@ pub enum CollectionTypeProvisioned { Fleet, } -impl std::fmt::Display for CollectionTypeProvisioned { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - CollectionTypeProvisioned::Project => write!(f, "project"), - CollectionTypeProvisioned::Organization => { - write!(f, "organization") - } - CollectionTypeProvisioned::Silo => write!(f, "silo"), - CollectionTypeProvisioned::Fleet => write!(f, "fleet"), - } - } -} - /// Describes virtual_provisioning_collection for a collection #[derive(Clone, Selectable, Queryable, Insertable, Debug)] #[diesel(table_name = virtual_provisioning_collection)] From 76704e7fd7f9bcfefafd8de3d057cb09562d2770 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 10 Jan 2023 23:20:51 -0500 Subject: [PATCH 55/80] to ByteCount --- .../src/virtual_provisioning_collection.rs | 16 +++-- .../src/virtual_provisioning_resource.rs | 12 ++-- nexus/src/app/provisioning.rs | 6 +- nexus/src/app/sagas/disk_create.rs | 14 +--- nexus/src/app/sagas/disk_delete.rs | 17 +---- nexus/src/app/sagas/instance_create.rs | 19 ++---- nexus/src/app/sagas/instance_delete.rs | 10 +-- nexus/src/app/sagas/snapshot_create.rs | 14 +--- nexus/src/app/sagas/snapshot_delete.rs | 9 +-- .../virtual_provisioning_collection.rs | 23 ++++--- .../virtual_provisioning_collection_update.rs | 15 +++-- nexus/tests/integration_tests/disks.rs | 66 ++++++++++++------- nexus/tests/integration_tests/instances.rs | 13 ++-- nexus/tests/integration_tests/snapshots.rs | 24 +++---- 14 files changed, 113 insertions(+), 145 deletions(-) diff --git a/nexus/db-model/src/virtual_provisioning_collection.rs b/nexus/db-model/src/virtual_provisioning_collection.rs index 565ce8c80ae..88fa5e52d9c 100644 --- a/nexus/db-model/src/virtual_provisioning_collection.rs +++ b/nexus/db-model/src/virtual_provisioning_collection.rs @@ -3,6 +3,8 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. use crate::schema::virtual_provisioning_collection; +use crate::ByteCount; +use omicron_common::api::external; use parse_display::Display; use uuid::Uuid; @@ -21,9 +23,9 @@ pub struct VirtualProvisioningCollection { pub id: Uuid, pub collection_type: String, - pub virtual_disk_bytes_provisioned: i64, + pub virtual_disk_bytes_provisioned: ByteCount, pub cpus_provisioned: i64, - pub ram_provisioned: i64, + pub ram_provisioned: ByteCount, } impl VirtualProvisioningCollection { @@ -31,15 +33,17 @@ impl VirtualProvisioningCollection { Self { id, collection_type: collection_type.to_string(), - virtual_disk_bytes_provisioned: 0, + virtual_disk_bytes_provisioned: ByteCount( + external::ByteCount::from(0), + ), cpus_provisioned: 0, - ram_provisioned: 0, + ram_provisioned: ByteCount(external::ByteCount::from(0)), } } pub fn is_empty(&self) -> bool { - self.virtual_disk_bytes_provisioned == 0 + self.virtual_disk_bytes_provisioned.to_bytes() == 0 && self.cpus_provisioned == 0 - && self.ram_provisioned == 0 + && self.ram_provisioned.to_bytes() == 0 } } diff --git a/nexus/db-model/src/virtual_provisioning_resource.rs b/nexus/db-model/src/virtual_provisioning_resource.rs index 9d5f7a8cba8..cb388710907 100644 --- a/nexus/db-model/src/virtual_provisioning_resource.rs +++ b/nexus/db-model/src/virtual_provisioning_resource.rs @@ -3,6 +3,8 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. use crate::schema::virtual_provisioning_resource; +use crate::ByteCount; +use omicron_common::api::external; use uuid::Uuid; #[derive(Debug)] @@ -29,9 +31,9 @@ pub struct VirtualProvisioningResource { pub id: Uuid, pub resource_type: String, - pub virtual_disk_bytes_provisioned: i64, + pub virtual_disk_bytes_provisioned: ByteCount, pub cpus_provisioned: i64, - pub ram_provisioned: i64, + pub ram_provisioned: ByteCount, } impl VirtualProvisioningResource { @@ -39,9 +41,11 @@ impl VirtualProvisioningResource { Self { id, resource_type: resource_type.to_string(), - virtual_disk_bytes_provisioned: 0, + virtual_disk_bytes_provisioned: ByteCount( + external::ByteCount::from(0), + ), cpus_provisioned: 0, - ram_provisioned: 0, + ram_provisioned: ByteCount(external::ByteCount::from(0)), } } } diff --git a/nexus/src/app/provisioning.rs b/nexus/src/app/provisioning.rs index 5c001baf187..a537706c7b4 100644 --- a/nexus/src/app/provisioning.rs +++ b/nexus/src/app/provisioning.rs @@ -20,19 +20,19 @@ struct CollectionTarget { #[derive(Debug, Clone, Metric)] struct VirtualDiskSpaceProvisioned { #[datum] - bytes_used: i64, + bytes_used: u64, } #[derive(Debug, Clone, Metric)] struct CpusProvisioned { #[datum] - cpus: i64, + cpus: u64, } #[derive(Debug, Clone, Metric)] struct RamProvisioned { #[datum] - bytes: i64, + bytes: u64, } /// An oximeter producer for reporting [`VirtualProvisioningCollection`] information to Clickhouse. diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index 67c34938d9b..ce16eee8bdf 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -258,13 +258,7 @@ async fn sdc_account_space( &opctx, disk_created.id(), params.project_id, - i64::try_from(disk_created.size.to_bytes()) - .map_err(|e| { - Error::internal_error(&format!( - "updating resource provisioning: {e}" - )) - }) - .map_err(ActionError::action_failed)?, + disk_created.size, ) .await .map_err(ActionError::action_failed)?; @@ -285,11 +279,7 @@ async fn sdc_account_space_undo( &opctx, disk_created.id(), params.project_id, - -i64::try_from(disk_created.size.to_bytes()).map_err(|e| { - Error::internal_error(&format!( - "updating resource provisioning: {e}" - )) - })?, + disk_created.size, ) .await .map_err(ActionError::action_failed)?; diff --git a/nexus/src/app/sagas/disk_delete.rs b/nexus/src/app/sagas/disk_delete.rs index 0c74112e109..9f693ad7a45 100644 --- a/nexus/src/app/sagas/disk_delete.rs +++ b/nexus/src/app/sagas/disk_delete.rs @@ -9,7 +9,6 @@ use crate::app::sagas::declare_saga_actions; use crate::authn; use crate::context::OpContext; use crate::db; -use omicron_common::api::external::Error; use serde::Deserialize; use serde::Serialize; use steno::ActionError; @@ -96,13 +95,7 @@ async fn sdd_account_space( &opctx, deleted_disk.id(), params.project_id, - -i64::try_from(deleted_disk.size.to_bytes()) - .map_err(|e| { - Error::internal_error(&format!( - "updating resource provisioning: {e}" - )) - }) - .map_err(ActionError::action_failed)?, + deleted_disk.size, ) .await .map_err(ActionError::action_failed)?; @@ -123,13 +116,7 @@ async fn sdd_account_space_undo( &opctx, deleted_disk.id(), params.project_id, - i64::try_from(deleted_disk.size.to_bytes()) - .map_err(|e| { - Error::internal_error(&format!( - "updating resource provisioning: {e}" - )) - }) - .map_err(ActionError::action_failed)?, + deleted_disk.size, ) .await .map_err(ActionError::action_failed)?; diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index f390e0f0eb7..afc71a2c9b8 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -12,6 +12,7 @@ use crate::app::{ use crate::context::OpContext; use crate::db::identity::Resource; use crate::db::lookup::LookupPath; +use crate::db::model::ByteCount as DbByteCount; use crate::db::queries::network_interface::InsertError as InsertNicError; use crate::external_api::params; use crate::{authn, authz, db}; @@ -791,13 +792,7 @@ async fn sic_account_resources( instance_id, params.project_id, i64::from(params.create_params.ncpus.0), - i64::try_from(params.create_params.memory.to_bytes()) - .map_err(|e| { - Error::internal_error(&format!( - "updating resource provisioning: {e}" - )) - }) - .map_err(ActionError::action_failed)?, + DbByteCount(params.create_params.memory), ) .await .map_err(ActionError::action_failed)?; @@ -818,14 +813,8 @@ async fn sic_account_resources_undo( &opctx, instance_id, params.project_id, - -i64::from(params.create_params.ncpus.0), - -i64::try_from(params.create_params.memory.to_bytes()) - .map_err(|e| { - Error::internal_error(&format!( - "updating resource provisioning: {e}" - )) - }) - .map_err(ActionError::action_failed)?, + i64::from(params.create_params.ncpus.0), + DbByteCount(params.create_params.memory), ) .await .map_err(ActionError::action_failed)?; diff --git a/nexus/src/app/sagas/instance_delete.rs b/nexus/src/app/sagas/instance_delete.rs index 5f5b91b14ab..7a7b5da8376 100644 --- a/nexus/src/app/sagas/instance_delete.rs +++ b/nexus/src/app/sagas/instance_delete.rs @@ -136,14 +136,8 @@ async fn sid_account_resources( &opctx, params.instance.id(), params.instance.project_id, - -i64::from(params.instance.runtime_state.ncpus.0 .0), - -i64::try_from(params.instance.runtime_state.memory.to_bytes()) - .map_err(|e| { - Error::internal_error(&format!( - "updating resource provisioning: {e}" - )) - }) - .map_err(ActionError::action_failed)?, + i64::from(params.instance.runtime_state.ncpus.0 .0), + params.instance.runtime_state.memory, ) .await .map_err(ActionError::action_failed)?; diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index 8381bd7f265..65a7cda00ac 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -524,13 +524,7 @@ async fn ssc_account_space( &opctx, snapshot_created.id(), params.project_id, - i64::try_from(snapshot_created.size.to_bytes()) - .map_err(|e| { - Error::internal_error(&format!( - "updating resource provisioning: {e}" - )) - }) - .map_err(ActionError::action_failed)?, + snapshot_created.size, ) .await .map_err(ActionError::action_failed)?; @@ -552,11 +546,7 @@ async fn ssc_account_space_undo( &opctx, snapshot_created.id(), params.project_id, - -i64::try_from(snapshot_created.size.to_bytes()).map_err(|e| { - Error::internal_error(&format!( - "updating resource provisioning: {e}" - )) - })?, + snapshot_created.size, ) .await?; Ok(()) diff --git a/nexus/src/app/sagas/snapshot_delete.rs b/nexus/src/app/sagas/snapshot_delete.rs index c16d6995861..9ff8283ae6f 100644 --- a/nexus/src/app/sagas/snapshot_delete.rs +++ b/nexus/src/app/sagas/snapshot_delete.rs @@ -7,7 +7,6 @@ use crate::app::sagas; use crate::app::sagas::declare_saga_actions; use crate::context::OpContext; use crate::{authn, authz, db}; -use omicron_common::api::external::Error; use serde::Deserialize; use serde::Serialize; use steno::ActionError; @@ -133,13 +132,7 @@ async fn ssd_account_space( &opctx, params.authz_snapshot.id(), params.snapshot.project_id, - -i64::try_from(params.snapshot.size.to_bytes()) - .map_err(|e| { - Error::internal_error(&format!( - "updating resource provisioning: {e}" - )) - }) - .map_err(ActionError::action_failed)?, + params.snapshot.size, ) .await .map_err(ActionError::action_failed)?; diff --git a/nexus/src/db/datastore/virtual_provisioning_collection.rs b/nexus/src/db/datastore/virtual_provisioning_collection.rs index 365d7d7a57f..7a792a2a1a9 100644 --- a/nexus/src/db/datastore/virtual_provisioning_collection.rs +++ b/nexus/src/db/datastore/virtual_provisioning_collection.rs @@ -9,6 +9,7 @@ use crate::context::OpContext; use crate::db; use crate::db::error::public_error_from_diesel_pool; use crate::db::error::ErrorHandler; +use crate::db::model::ByteCount; use crate::db::model::VirtualProvisioningCollection; use crate::db::pool::DbConnection; use crate::db::queries::virtual_provisioning_collection_update::VirtualProvisioningCollectionUpdate; @@ -73,7 +74,9 @@ impl Producer { Sample::new( &CollectionTarget { id: provision.id }, &VirtualDiskSpaceProvisioned { - bytes_used: provision.virtual_disk_bytes_provisioned, + bytes_used: provision + .virtual_disk_bytes_provisioned + .into(), }, ) }) @@ -97,7 +100,7 @@ impl Producer { .chain(provisions.iter().map(|provision| { Sample::new( &CollectionTarget { id: provision.id }, - &RamProvisioned { bytes: provision.ram_provisioned }, + &RamProvisioned { bytes: provision.ram_provisioned.into() }, ) })) .collect::>(); @@ -275,7 +278,7 @@ impl DataStore { opctx: &OpContext, id: Uuid, project_id: Uuid, - disk_byte_diff: i64, + disk_byte_diff: ByteCount, ) -> Result, Error> { self.virtual_provisioning_collection_insert_storage( opctx, @@ -292,7 +295,7 @@ impl DataStore { opctx: &OpContext, id: Uuid, project_id: Uuid, - disk_byte_diff: i64, + disk_byte_diff: ByteCount, ) -> Result, Error> { self.virtual_provisioning_collection_insert_storage( opctx, @@ -310,7 +313,7 @@ impl DataStore { opctx: &OpContext, id: Uuid, project_id: Uuid, - disk_byte_diff: i64, + disk_byte_diff: ByteCount, storage_type: StorageType, ) -> Result, Error> { let provisions = @@ -335,7 +338,7 @@ impl DataStore { opctx: &OpContext, id: Uuid, project_id: Uuid, - disk_byte_diff: i64, + disk_byte_diff: ByteCount, ) -> Result, Error> { self.virtual_provisioning_collection_delete_storage( opctx, @@ -351,7 +354,7 @@ impl DataStore { opctx: &OpContext, id: Uuid, project_id: Uuid, - disk_byte_diff: i64, + disk_byte_diff: ByteCount, ) -> Result, Error> { self.virtual_provisioning_collection_delete_storage( opctx, @@ -368,7 +371,7 @@ impl DataStore { opctx: &OpContext, id: Uuid, project_id: Uuid, - disk_byte_diff: i64, + disk_byte_diff: ByteCount, ) -> Result, Error> { let provisions = VirtualProvisioningCollectionUpdate::new_delete_storage( @@ -393,7 +396,7 @@ impl DataStore { id: Uuid, project_id: Uuid, cpus_diff: i64, - ram_diff: i64, + ram_diff: ByteCount, ) -> Result, Error> { let provisions = VirtualProvisioningCollectionUpdate::new_insert_instance( @@ -416,7 +419,7 @@ impl DataStore { id: Uuid, project_id: Uuid, cpus_diff: i64, - ram_diff: i64, + ram_diff: ByteCount, ) -> Result, Error> { let provisions = VirtualProvisioningCollectionUpdate::new_delete_instance( diff --git a/nexus/src/db/queries/virtual_provisioning_collection_update.rs b/nexus/src/db/queries/virtual_provisioning_collection_update.rs index 1aa6f0d16fe..13dd0352906 100644 --- a/nexus/src/db/queries/virtual_provisioning_collection_update.rs +++ b/nexus/src/db/queries/virtual_provisioning_collection_update.rs @@ -5,6 +5,7 @@ //! Implementation of queries for updating resource provisioning info. use crate::db::alias::ExpressionAlias; +use crate::db::model::ByteCount; use crate::db::model::ResourceTypeProvisioned; use crate::db::model::VirtualProvisioningCollection; use crate::db::model::VirtualProvisioningResource; @@ -295,7 +296,7 @@ impl VirtualProvisioningCollectionUpdate { pub fn new_insert_storage( id: uuid::Uuid, - disk_byte_diff: i64, + disk_byte_diff: ByteCount, project_id: uuid::Uuid, storage_type: crate::db::datastore::StorageType, ) -> Self { @@ -329,7 +330,7 @@ impl VirtualProvisioningCollectionUpdate { pub fn new_delete_storage( id: uuid::Uuid, - disk_byte_diff: i64, + disk_byte_diff: ByteCount, project_id: uuid::Uuid, ) -> Self { use virtual_provisioning_collection::dsl as collection_dsl; @@ -349,14 +350,14 @@ impl VirtualProvisioningCollectionUpdate { // ... We subtract the disk usage. collection_dsl::virtual_disk_bytes_provisioned .eq(collection_dsl::virtual_disk_bytes_provisioned - + disk_byte_diff), + - disk_byte_diff), ) } pub fn new_insert_instance( id: uuid::Uuid, cpus_diff: i64, - ram_diff: i64, + ram_diff: ByteCount, project_id: uuid::Uuid, ) -> Self { use virtual_provisioning_collection::dsl as collection_dsl; @@ -396,7 +397,7 @@ impl VirtualProvisioningCollectionUpdate { pub fn new_delete_instance( id: uuid::Uuid, cpus_diff: i64, - ram_diff: i64, + ram_diff: ByteCount, project_id: uuid::Uuid, ) -> Self { use virtual_provisioning_collection::dsl as collection_dsl; @@ -416,9 +417,9 @@ impl VirtualProvisioningCollectionUpdate { // ... We update the resource usage. ( collection_dsl::cpus_provisioned - .eq(collection_dsl::cpus_provisioned + cpus_diff), + .eq(collection_dsl::cpus_provisioned - cpus_diff), collection_dsl::ram_provisioned - .eq(collection_dsl::ram_provisioned + ram_diff), + .eq(collection_dsl::ram_provisioned - ram_diff), ), ) } diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index e279ba34b09..a0ffc75f6fd 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -928,7 +928,9 @@ async fn test_disk_virtual_provisioning_collection( .await .unwrap(); assert_eq!( - virtual_provisioning_collection.virtual_disk_bytes_provisioned, + virtual_provisioning_collection + .virtual_disk_bytes_provisioned + .to_bytes(), 0 ); let virtual_provisioning_collection = datastore @@ -936,7 +938,9 @@ async fn test_disk_virtual_provisioning_collection( .await .unwrap(); assert_eq!( - virtual_provisioning_collection.virtual_disk_bytes_provisioned, + virtual_provisioning_collection + .virtual_disk_bytes_provisioned + .to_bytes(), 0 ); let virtual_provisioning_collection = datastore @@ -944,7 +948,9 @@ async fn test_disk_virtual_provisioning_collection( .await .unwrap(); assert_eq!( - virtual_provisioning_collection.virtual_disk_bytes_provisioned, + virtual_provisioning_collection + .virtual_disk_bytes_provisioned + .to_bytes(), 0 ); let virtual_provisioning_collection = datastore @@ -952,7 +958,9 @@ async fn test_disk_virtual_provisioning_collection( .await .unwrap(); assert_eq!( - virtual_provisioning_collection.virtual_disk_bytes_provisioned, + virtual_provisioning_collection + .virtual_disk_bytes_provisioned + .to_bytes(), 0 ); let virtual_provisioning_collection = datastore @@ -960,7 +968,9 @@ async fn test_disk_virtual_provisioning_collection( .await .unwrap(); assert_eq!( - virtual_provisioning_collection.virtual_disk_bytes_provisioned, + virtual_provisioning_collection + .virtual_disk_bytes_provisioned + .to_bytes(), 0 ); @@ -995,15 +1005,17 @@ async fn test_disk_virtual_provisioning_collection( .await .unwrap(); assert_eq!( - virtual_provisioning_collection.virtual_disk_bytes_provisioned, - disk_size.to_bytes() as i64 + virtual_provisioning_collection.virtual_disk_bytes_provisioned.0, + disk_size ); let virtual_provisioning_collection = datastore .virtual_provisioning_collection_get(&opctx, project_id2) .await .unwrap(); assert_eq!( - virtual_provisioning_collection.virtual_disk_bytes_provisioned, + virtual_provisioning_collection + .virtual_disk_bytes_provisioned + .to_bytes(), 0 ); let virtual_provisioning_collection = datastore @@ -1011,24 +1023,24 @@ async fn test_disk_virtual_provisioning_collection( .await .unwrap(); assert_eq!( - virtual_provisioning_collection.virtual_disk_bytes_provisioned, - disk_size.to_bytes() as i64 + virtual_provisioning_collection.virtual_disk_bytes_provisioned.0, + disk_size ); let virtual_provisioning_collection = datastore .virtual_provisioning_collection_get(&opctx, *SILO_ID) .await .unwrap(); assert_eq!( - virtual_provisioning_collection.virtual_disk_bytes_provisioned, - disk_size.to_bytes() as i64 + virtual_provisioning_collection.virtual_disk_bytes_provisioned.0, + disk_size ); let virtual_provisioning_collection = datastore .virtual_provisioning_collection_get(&opctx, *FLEET_ID) .await .unwrap(); assert_eq!( - virtual_provisioning_collection.virtual_disk_bytes_provisioned, - disk_size.to_bytes() as i64 + virtual_provisioning_collection.virtual_disk_bytes_provisioned.0, + disk_size ); // Ask for a 1 gibibyte disk in the second project. @@ -1063,24 +1075,26 @@ async fn test_disk_virtual_provisioning_collection( .await .unwrap(); assert_eq!( - virtual_provisioning_collection.virtual_disk_bytes_provisioned, - disk_size.to_bytes() as i64 + virtual_provisioning_collection.virtual_disk_bytes_provisioned.0, + disk_size ); let virtual_provisioning_collection = datastore .virtual_provisioning_collection_get(&opctx, project_id2) .await .unwrap(); assert_eq!( - virtual_provisioning_collection.virtual_disk_bytes_provisioned, - disk_size.to_bytes() as i64 + virtual_provisioning_collection.virtual_disk_bytes_provisioned.0, + disk_size ); let virtual_provisioning_collection = datastore .virtual_provisioning_collection_get(&opctx, org_id) .await .unwrap(); assert_eq!( - virtual_provisioning_collection.virtual_disk_bytes_provisioned, - 2 * disk_size.to_bytes() as i64 + virtual_provisioning_collection + .virtual_disk_bytes_provisioned + .to_bytes(), + 2 * disk_size.to_bytes() ); // Delete the disk we just created, observe the utilization drop @@ -1096,15 +1110,17 @@ async fn test_disk_virtual_provisioning_collection( .await .unwrap(); assert_eq!( - virtual_provisioning_collection.virtual_disk_bytes_provisioned, - disk_size.to_bytes() as i64 + virtual_provisioning_collection.virtual_disk_bytes_provisioned.0, + disk_size ); let virtual_provisioning_collection = datastore .virtual_provisioning_collection_get(&opctx, project_id2) .await .unwrap(); assert_eq!( - virtual_provisioning_collection.virtual_disk_bytes_provisioned, + virtual_provisioning_collection + .virtual_disk_bytes_provisioned + .to_bytes(), 0 ); let virtual_provisioning_collection = datastore @@ -1112,8 +1128,8 @@ async fn test_disk_virtual_provisioning_collection( .await .unwrap(); assert_eq!( - virtual_provisioning_collection.virtual_disk_bytes_provisioned, - disk_size.to_bytes() as i64 + virtual_provisioning_collection.virtual_disk_bytes_provisioned.0, + disk_size, ); } diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 7dc30bee434..fb181824403 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -547,7 +547,7 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { .await .unwrap(); assert_eq!(virtual_provisioning_collection.cpus_provisioned, 0); - assert_eq!(virtual_provisioning_collection.ram_provisioned, 0); + assert_eq!(virtual_provisioning_collection.ram_provisioned.to_bytes(), 0); // Query the view of these metrics stored within Clickhouse let metric_url = |metric_type: &str, id: Uuid| { @@ -592,8 +592,8 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { .unwrap(); assert_eq!(virtual_provisioning_collection.cpus_provisioned, 4); assert_eq!( - virtual_provisioning_collection.ram_provisioned, - i64::try_from(ByteCount::from_gibibytes_u32(1).to_bytes()).unwrap(), + virtual_provisioning_collection.ram_provisioned.0, + ByteCount::from_gibibytes_u32(1), ); // Stop the instance @@ -617,7 +617,10 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { let expected_ram = i64::try_from(ByteCount::from_gibibytes_u32(1).to_bytes()).unwrap(); assert_eq!(virtual_provisioning_collection.cpus_provisioned, expected_cpus); - assert_eq!(virtual_provisioning_collection.ram_provisioned, expected_ram,); + assert_eq!( + i64::from(virtual_provisioning_collection.ram_provisioned.0), + expected_ram + ); oximeter.force_collect().await; for id in vec![organization_id, project_id] { query_for_metrics_until_it_contains( @@ -648,7 +651,7 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { .await .unwrap(); assert_eq!(virtual_provisioning_collection.cpus_provisioned, 0); - assert_eq!(virtual_provisioning_collection.ram_provisioned, 0); + assert_eq!(virtual_provisioning_collection.ram_provisioned.to_bytes(), 0); oximeter.force_collect().await; for id in vec![organization_id, project_id] { query_for_metrics_until_it_contains( diff --git a/nexus/tests/integration_tests/snapshots.rs b/nexus/tests/integration_tests/snapshots.rs index 47c14998fee..fcfe7118386 100644 --- a/nexus/tests/integration_tests/snapshots.rs +++ b/nexus/tests/integration_tests/snapshots.rs @@ -327,10 +327,7 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .virtual_provisioning_collection_get(&opctx, project_id) .await .unwrap(); - assert_eq!( - provision.virtual_disk_bytes_provisioned, - disk_size.to_bytes() as i64 - ); + assert_eq!(provision.virtual_disk_bytes_provisioned.0, disk_size); // Issue snapshot request let snapshots_url = format!( @@ -358,8 +355,8 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .await .unwrap(); assert_eq!( - provision.virtual_disk_bytes_provisioned, - 2 * disk_size.to_bytes() as i64 + provision.virtual_disk_bytes_provisioned.to_bytes(), + 2 * disk_size.to_bytes() ); // Create a disk from this snapshot @@ -393,8 +390,8 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .await .unwrap(); assert_eq!( - provision.virtual_disk_bytes_provisioned, - 3 * disk_size.to_bytes() as i64 + provision.virtual_disk_bytes_provisioned.to_bytes(), + 3 * disk_size.to_bytes() ); // Delete snapshot @@ -417,8 +414,8 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .await .unwrap(); assert_eq!( - provision.virtual_disk_bytes_provisioned, - 2 * disk_size.to_bytes() as i64 + provision.virtual_disk_bytes_provisioned.to_bytes(), + 2 * disk_size.to_bytes() ); // Delete the disk using the snapshot @@ -432,10 +429,7 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .virtual_provisioning_collection_get(&opctx, project_id) .await .unwrap(); - assert_eq!( - provision.virtual_disk_bytes_provisioned, - disk_size.to_bytes() as i64 - ); + assert_eq!(provision.virtual_disk_bytes_provisioned.0, disk_size); // Delete the original base disk let disk_url = format!("{}/{}", disks_url, base_disk_name); @@ -448,7 +442,7 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { .virtual_provisioning_collection_get(&opctx, project_id) .await .unwrap(); - assert_eq!(provision.virtual_disk_bytes_provisioned, 0); + assert_eq!(provision.virtual_disk_bytes_provisioned.to_bytes(), 0); } // Test the various ways Nexus can reject a disk created from a snapshot From 4e6ea232bb8424d1f5666cdbe779b02ac596793b Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 10 Jan 2023 23:28:36 -0500 Subject: [PATCH 56/80] silo not found when inserting returns 404 --- nexus/src/db/datastore/organization.rs | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/nexus/src/db/datastore/organization.rs b/nexus/src/db/datastore/organization.rs index a30a1e70505..74c93c00b2a 100644 --- a/nexus/src/db/datastore/organization.rs +++ b/nexus/src/db/datastore/organization.rs @@ -6,6 +6,7 @@ use super::DataStore; use crate::authz; +use crate::authz::ApiResource; use crate::context::OpContext; use crate::db; use crate::db::collection_insert::AsyncInsertError; @@ -65,13 +66,7 @@ impl DataStore { .await .map_err(|e| match e { AsyncInsertError::CollectionNotFound => { - Error::InternalError { - internal_message: format!( - "attempting to create an \ - organization under non-existent silo {}", - silo_id - ), - } + authz_silo.not_found() } AsyncInsertError::DatabaseError(e) => { public_error_from_diesel_pool( From e954c771e0572c3a8cb2ce68657b10211a68b7b1 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 11 Jan 2023 00:04:43 -0500 Subject: [PATCH 57/80] system_metric_lookup in app layer --- nexus/src/app/metrics.rs | 39 ++++++++++++++++++++++ nexus/src/app/mod.rs | 1 + nexus/src/external_api/http_entrypoints.rs | 39 ++-------------------- 3 files changed, 43 insertions(+), 36 deletions(-) create mode 100644 nexus/src/app/metrics.rs diff --git a/nexus/src/app/metrics.rs b/nexus/src/app/metrics.rs new file mode 100644 index 00000000000..947c8ef1a6f --- /dev/null +++ b/nexus/src/app/metrics.rs @@ -0,0 +1,39 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Metrics + +use crate::authz; +use crate::context::OpContext; +use crate::external_api::http_entrypoints::SystemMetricName; +use crate::external_api::http_entrypoints::SystemMetricParams; +use omicron_common::api::external::Error; +use oximeter_db::Measurement; +use std::num::NonZeroU32; + +impl super::Nexus { + pub async fn system_metric_lookup( + &self, + opctx: &OpContext, + metric_name: SystemMetricName, + query: SystemMetricParams, + limit: NonZeroU32, + ) -> Result, Error> { + let timeseries = match metric_name { + SystemMetricName::VirtualDiskSpaceProvisioned + | SystemMetricName::CpusProvisioned + | SystemMetricName::RamProvisioned => { + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + format!("collection_target:{metric_name}") + } + }; + self.select_timeseries( + ×eries, + &[&format!("id=={}", query.id)], + query.pagination, + limit, + ) + .await + } +} diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index 2c16a4a9f4d..ccf34243834 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -30,6 +30,7 @@ mod iam; mod image; mod instance; mod ip_pool; +mod metrics; mod organization; mod oximeter; mod project; diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 4a10fc2d321..e8639071dc6 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -5133,42 +5133,9 @@ async fn system_metric( let handler = async { let opctx = OpContext::for_external_api(&rqctx).await?; - - let result = match metric_name { - SystemMetricName::VirtualDiskSpaceProvisioned => { - opctx.authorize(authz::Action::Read, &authz::FLEET).await?; - nexus - .select_timeseries( - "collection_target:virtual_disk_space_provisioned", - &[&format!("id=={}", query.id)], - query.pagination, - limit, - ) - .await? - } - SystemMetricName::CpusProvisioned => { - opctx.authorize(authz::Action::Read, &authz::FLEET).await?; - nexus - .select_timeseries( - "collection_target:cpus_provisioned", - &[&format!("id=={}", query.id)], - query.pagination, - limit, - ) - .await? - } - SystemMetricName::RamProvisioned => { - opctx.authorize(authz::Action::Read, &authz::FLEET).await?; - nexus - .select_timeseries( - "collection_target:ram_provisioned", - &[&format!("id=={}", query.id)], - query.pagination, - limit, - ) - .await? - } - }; + let result = nexus + .system_metric_lookup(&opctx, metric_name, query, limit) + .await?; Ok(HttpResponseOk(result)) }; From fc81adaed1910ec006c7eee9a9b27339ba4a90d0 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 11 Jan 2023 00:08:16 -0500 Subject: [PATCH 58/80] Remove ResourceType::VirtualProvision --- common/src/api/external/mod.rs | 1 - .../db/datastore/virtual_provisioning_collection.rs | 12 ++---------- 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index 529a1c6ba70..6d988c61397 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -616,7 +616,6 @@ pub enum ResourceType { RoleBuiltin, UpdateAvailableArtifact, UserBuiltin, - VirtualProvision, Zpool, } diff --git a/nexus/src/db/datastore/virtual_provisioning_collection.rs b/nexus/src/db/datastore/virtual_provisioning_collection.rs index 7a792a2a1a9..df67e2351fa 100644 --- a/nexus/src/db/datastore/virtual_provisioning_collection.rs +++ b/nexus/src/db/datastore/virtual_provisioning_collection.rs @@ -15,9 +15,7 @@ use crate::db::pool::DbConnection; use crate::db::queries::virtual_provisioning_collection_update::VirtualProvisioningCollectionUpdate; use async_bb8_diesel::{AsyncRunQueryDsl, PoolError}; use diesel::prelude::*; -use omicron_common::api::external::{ - DeleteResult, Error, LookupType, ResourceType, -}; +use omicron_common::api::external::{DeleteResult, Error}; use oximeter::{types::Sample, Metric, MetricsError, Target}; use std::sync::{Arc, Mutex}; use uuid::Uuid; @@ -207,13 +205,7 @@ impl DataStore { .get_result_async(self.pool_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByLookup( - ResourceType::VirtualProvision, - LookupType::ById(id), - ), - ) + public_error_from_diesel_pool(e, ErrorHandler::Server) })?; Ok(virtual_provisioning_collection) } From 6837d177af53c26aadc4647305bfa57a66b20269 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 11 Jan 2023 01:14:08 -0500 Subject: [PATCH 59/80] Simplify nexus testing logic --- nexus/tests/integration_tests/disks.rs | 60 +++++++------ nexus/tests/integration_tests/instances.rs | 98 ++++++++++++---------- nexus/tests/integration_tests/metrics.rs | 65 ++++---------- 3 files changed, 107 insertions(+), 116 deletions(-) diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index a0ffc75f6fd..37ac3eb920d 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -4,13 +4,14 @@ //! Tests basic disk support in the API -use super::metrics::query_for_metrics_until_they_exist; +use super::metrics::{ + query_for_latest_metric, query_for_metrics_until_they_exist, +}; use chrono::Utc; use crucible_agent_client::types::State as RegionState; use dropshot::test_util::ClientTestContext; use dropshot::HttpErrorResponseBody; -use dropshot::ResultsPage; use http::method::Method; use http::StatusCode; use nexus_test_utils::http_testing::AuthnMode; @@ -1393,7 +1394,8 @@ async fn test_disk_metrics(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; let project_id = create_org_and_project(client).await; - create_disk(&client, ORG_NAME, PROJECT_NAME, DISK_NAME).await; + let disk = create_disk(&client, ORG_NAME, PROJECT_NAME, DISK_NAME).await; + oximeter.force_collect().await; // Whenever we grab this URL, get the surrounding few seconds of metrics. let metric_url = |metric_type: &str| { @@ -1404,16 +1406,31 @@ async fn test_disk_metrics(cptestctx: &ControlPlaneTestContext) { Utc::now() + chrono::Duration::seconds(2), ) }; + // Check the utilization info for the whole project too. + let utilization_url = |id: Uuid| { + format!( + "/system/metrics/virtual_disk_space_provisioned?start_time={:?}&end_time={:?}&id={:?}", + Utc::now() - chrono::Duration::seconds(2), + Utc::now() + chrono::Duration::seconds(2), + id, + ) + }; // Try accessing metrics before we attach the disk to an instance. // // Observe that no metrics exist yet; no "upstairs" should have been // instantiated on a sled. oximeter.force_collect().await; - let measurements: ResultsPage = - objects_list_page_authz(client, &metric_url("read")).await; + let measurements = + objects_list_page_authz::(client, &metric_url("read")) + .await; assert!(measurements.items.is_empty()); + assert_eq!( + query_for_latest_metric(client, &utilization_url(project_id),).await, + i64::from(disk.size) + ); + // Create an instance, attach the disk to it. create_instance_with_disk(client).await; oximeter.force_collect().await; @@ -1434,37 +1451,26 @@ async fn test_disk_metrics(cptestctx: &ControlPlaneTestContext) { } // Check the utilization info for the whole project too. - let utilization_url = |id: Uuid| { - format!( - "/system/metrics/virtual_disk_space_provisioned?start_time={:?}&end_time={:?}&id={:?}", - Utc::now() - chrono::Duration::seconds(20), - Utc::now() + chrono::Duration::seconds(20), - id, - ) - }; - - // We should create measurements when the disk is created, and again when - // it's modified. However, due to our inability to control the sampling - // rate, we just keep polling until we see *something*. - // - // Normally we'll see two measurements, but it's possible to only see one - // if the producer interface is queried in between the two samples. - let measurements = query_for_metrics_until_they_exist( - client, - &utilization_url(project_id), - ) - .await; - assert!(!measurements.items.is_empty()); + assert_eq!( + query_for_latest_metric(client, &utilization_url(project_id),).await, + i64::from(disk.size) + ); } #[nexus_test] async fn test_disk_metrics_paginated(cptestctx: &ControlPlaneTestContext) { + // Normally, Nexus is not registered as a producer for tests. + // Turn this bit on so we can also test some metrics from Nexus itself. + cptestctx.server.register_as_producer().await; + let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; create_org_and_project(client).await; create_disk(&client, ORG_NAME, PROJECT_NAME, DISK_NAME).await; create_instance_with_disk(client).await; + let oximeter = &cptestctx.oximeter; + oximeter.force_collect().await; for metric in &ALL_METRICS { let collection_url = format!("{}/{DISK_NAME}/metrics/{metric}", get_disks_url()); @@ -1474,7 +1480,7 @@ async fn test_disk_metrics_paginated(cptestctx: &ControlPlaneTestContext) { Utc::now() + chrono::Duration::seconds(2), ); - query_for_metrics_until_they_exist( + objects_list_page_authz::( client, &format!("{collection_url}?{initial_params}"), ) diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index fb181824403..6afe6850f67 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -4,7 +4,7 @@ //! Tests basic instance support in the API -use super::metrics::query_for_metrics_until_it_contains; +use super::metrics::query_for_latest_metric; use chrono::Utc; use http::method::Method; @@ -526,7 +526,7 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { let nexus = &apictx.nexus; let datastore = nexus.datastore(); - // Create an IP pool and project that we'll use for testing. + // Create an IP pool and project that we'll use for testing. populate_ip_pool(&client, "default", None).await; let organization_id = create_organization(&client, ORGANIZATION_NAME).await.identity.id; @@ -559,27 +559,27 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { }; oximeter.force_collect().await; for id in vec![organization_id, project_id] { - query_for_metrics_until_it_contains( - client, - &metric_url("virtual_disk_space_provisioned", id), - 0, - 0, - ) - .await; - query_for_metrics_until_it_contains( - client, - &metric_url("cpus_provisioned", id), - 0, - 0, - ) - .await; - query_for_metrics_until_it_contains( + assert_eq!( + query_for_latest_metric( + client, + &metric_url("virtual_disk_space_provisioned", id), + ) + .await, + 0 + ); + assert_eq!( + query_for_latest_metric( + client, + &metric_url("cpus_provisioned", id), + ) + .await, + 0 + ); + assert_eq!(query_for_latest_metric( client, &metric_url("ram_provisioned", id), - 0, - 0, ) - .await; + .await, 0); } // Create an instance. @@ -623,20 +623,27 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { ); oximeter.force_collect().await; for id in vec![organization_id, project_id] { - query_for_metrics_until_it_contains( - client, - &metric_url("cpus_provisioned", id), - 1, - expected_cpus, - ) - .await; - query_for_metrics_until_it_contains( + assert_eq!( + query_for_latest_metric( + client, + &metric_url("virtual_disk_space_provisioned", id), + ) + .await, + 0 + ); + assert_eq!( + query_for_latest_metric( + client, + &metric_url("cpus_provisioned", id), + ) + .await, + expected_cpus + ); + assert_eq!(query_for_latest_metric( client, &metric_url("ram_provisioned", id), - 1, - expected_ram, ) - .await; + .await, expected_ram); } // Stop the instance @@ -654,20 +661,27 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { assert_eq!(virtual_provisioning_collection.ram_provisioned.to_bytes(), 0); oximeter.force_collect().await; for id in vec![organization_id, project_id] { - query_for_metrics_until_it_contains( - client, - &metric_url("cpus_provisioned", id), - 2, - 0, - ) - .await; - query_for_metrics_until_it_contains( + assert_eq!( + query_for_latest_metric( + client, + &metric_url("virtual_disk_space_provisioned", id), + ) + .await, + 0 + ); + assert_eq!( + query_for_latest_metric( + client, + &metric_url("cpus_provisioned", id), + ) + .await, + 0 + ); + assert_eq!(query_for_latest_metric( client, &metric_url("ram_provisioned", id), - 2, - 0, ) - .await; + .await, 0); } } diff --git a/nexus/tests/integration_tests/metrics.rs b/nexus/tests/integration_tests/metrics.rs index a196450ccb6..2a39892e05a 100644 --- a/nexus/tests/integration_tests/metrics.rs +++ b/nexus/tests/integration_tests/metrics.rs @@ -5,7 +5,6 @@ use dropshot::test_util::ClientTestContext; use dropshot::ResultsPage; use nexus_test_utils::resource_helpers::objects_list_page_authz; -use omicron_common::backoff; use oximeter::types::Datum; use oximeter::types::Measurement; @@ -13,56 +12,28 @@ pub async fn query_for_metrics_until_they_exist( client: &ClientTestContext, path: &str, ) -> ResultsPage { - backoff::retry_notify( - backoff::retry_policy_local(), - || async { - let measurements: ResultsPage = - objects_list_page_authz(client, path).await; + loop { + let measurements: ResultsPage = + objects_list_page_authz(client, path).await; - if measurements.items.is_empty() { - return Err(backoff::BackoffError::transient("No metrics yet")); - } - Ok(measurements) - }, - |error, _| { - eprintln!("Failed to query {path}: {error}"); - }, - ) - .await - .expect("Failed to query for measurements") + if !measurements.items.is_empty() { + return measurements; + } + tokio::time::sleep(tokio::time::Duration::from_millis(10)).await; + } } -pub async fn query_for_metrics_until_it_contains( +pub async fn query_for_latest_metric( client: &ClientTestContext, path: &str, - index: usize, - value: i64, -) -> ResultsPage { - backoff::retry_notify( - backoff::retry_policy_local(), - || async { - let measurements: ResultsPage = - objects_list_page_authz(client, path).await; - - if measurements.items.len() <= index { - return Err(backoff::BackoffError::transient(format!( - "Not enough metrics yet (only seen: {:?})", - measurements.items - ))); - } +) -> i64 { + let measurements: ResultsPage = + objects_list_page_authz(client, path).await; - let item = &measurements.items[index]; - let datum = match item.datum() { - Datum::I64(c) => c, - _ => panic!("Unexpected datum type {:?}", item.datum()), - }; - assert_eq!(*datum, value, "Datum exists, but has the wrong value"); - Ok(measurements) - }, - |error, _| { - eprintln!("Failed to query {path}: {error}"); - }, - ) - .await - .expect("Failed to query for measurements") + let item = &measurements.items[measurements.items.len() - 1]; + let datum = match item.datum() { + Datum::I64(c) => c, + _ => panic!("Unexpected datum type {:?}", item.datum()), + }; + return *datum; } From ae86c3374909ceea97de569d059c4a298f88ecc5 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 11 Jan 2023 12:42:03 -0500 Subject: [PATCH 60/80] tweak some test timings to fight flakes when under load --- nexus/tests/integration_tests/disks.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index 37ac3eb920d..83d0be6e565 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -1402,16 +1402,16 @@ async fn test_disk_metrics(cptestctx: &ControlPlaneTestContext) { let disk_url = format!("{}/{}", get_disks_url(), DISK_NAME); format!( "{disk_url}/metrics/{metric_type}?start_time={:?}&end_time={:?}", - Utc::now() - chrono::Duration::seconds(2), - Utc::now() + chrono::Duration::seconds(2), + Utc::now() - chrono::Duration::seconds(10), + Utc::now() + chrono::Duration::seconds(10), ) }; // Check the utilization info for the whole project too. let utilization_url = |id: Uuid| { format!( "/system/metrics/virtual_disk_space_provisioned?start_time={:?}&end_time={:?}&id={:?}", - Utc::now() - chrono::Duration::seconds(2), - Utc::now() + chrono::Duration::seconds(2), + Utc::now() - chrono::Duration::seconds(10), + Utc::now() + chrono::Duration::seconds(10), id, ) }; @@ -1420,14 +1420,13 @@ async fn test_disk_metrics(cptestctx: &ControlPlaneTestContext) { // // Observe that no metrics exist yet; no "upstairs" should have been // instantiated on a sled. - oximeter.force_collect().await; let measurements = objects_list_page_authz::(client, &metric_url("read")) .await; assert!(measurements.items.is_empty()); assert_eq!( - query_for_latest_metric(client, &utilization_url(project_id),).await, + query_for_latest_metric(client, &utilization_url(project_id)).await, i64::from(disk.size) ); @@ -1452,7 +1451,7 @@ async fn test_disk_metrics(cptestctx: &ControlPlaneTestContext) { // Check the utilization info for the whole project too. assert_eq!( - query_for_latest_metric(client, &utilization_url(project_id),).await, + query_for_latest_metric(client, &utilization_url(project_id)).await, i64::from(disk.size) ); } From 91fd7b8b0dd3c0962b265ad23874a99ce32b4928 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 11 Jan 2023 15:16:39 -0500 Subject: [PATCH 61/80] De-duplicate Nexus metrics producer --- nexus/src/app/mod.rs | 1 + nexus/src/app/provisioning.rs | 13 ++- nexus/src/db/datastore/mod.rs | 4 +- .../virtual_provisioning_collection.rs | 108 +----------------- 4 files changed, 11 insertions(+), 115 deletions(-) diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index ccf34243834..1f293e3790b 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -34,6 +34,7 @@ mod metrics; mod organization; mod oximeter; mod project; +pub mod provisioning; mod rack; pub mod saga; mod session; diff --git a/nexus/src/app/provisioning.rs b/nexus/src/app/provisioning.rs index a537706c7b4..dd6713dc409 100644 --- a/nexus/src/app/provisioning.rs +++ b/nexus/src/app/provisioning.rs @@ -20,19 +20,19 @@ struct CollectionTarget { #[derive(Debug, Clone, Metric)] struct VirtualDiskSpaceProvisioned { #[datum] - bytes_used: u64, + bytes_used: i64, } #[derive(Debug, Clone, Metric)] struct CpusProvisioned { #[datum] - cpus: u64, + cpus: i64, } #[derive(Debug, Clone, Metric)] struct RamProvisioned { #[datum] - bytes: u64, + bytes: i64, } /// An oximeter producer for reporting [`VirtualProvisioningCollection`] information to Clickhouse. @@ -69,7 +69,9 @@ impl Producer { Sample::new( &CollectionTarget { id: provision.id }, &VirtualDiskSpaceProvisioned { - bytes_used: provision.virtual_disk_bytes_provisioned, + bytes_used: provision + .virtual_disk_bytes_provisioned + .into(), }, ) }) @@ -93,7 +95,7 @@ impl Producer { .chain(provisions.iter().map(|provision| { Sample::new( &CollectionTarget { id: provision.id }, - &RamProvisioned { bytes: provision.ram_provisioned }, + &RamProvisioned { bytes: provision.ram_provisioned.into() }, ) })) .collect::>(); @@ -116,4 +118,3 @@ impl oximeter::Producer for Producer { Ok(Box::new(samples.into_iter())) } } - diff --git a/nexus/src/db/datastore/mod.rs b/nexus/src/db/datastore/mod.rs index 5aeebe3e4cc..2c201e1b86d 100644 --- a/nexus/src/db/datastore/mod.rs +++ b/nexus/src/db/datastore/mod.rs @@ -107,7 +107,7 @@ impl RunnableQuery for T where pub struct DataStore { pool: Arc, virtual_provisioning_collection_producer: - virtual_provisioning_collection::Producer, + crate::app::provisioning::Producer, } // The majority of `DataStore`'s methods live in our submodules as a concession @@ -118,7 +118,7 @@ impl DataStore { DataStore { pool, virtual_provisioning_collection_producer: - virtual_provisioning_collection::Producer::new(), + crate::app::provisioning::Producer::new(), } } diff --git a/nexus/src/db/datastore/virtual_provisioning_collection.rs b/nexus/src/db/datastore/virtual_provisioning_collection.rs index df67e2351fa..da64f76c6f1 100644 --- a/nexus/src/db/datastore/virtual_provisioning_collection.rs +++ b/nexus/src/db/datastore/virtual_provisioning_collection.rs @@ -16,112 +16,8 @@ use crate::db::queries::virtual_provisioning_collection_update::VirtualProvision use async_bb8_diesel::{AsyncRunQueryDsl, PoolError}; use diesel::prelude::*; use omicron_common::api::external::{DeleteResult, Error}; -use oximeter::{types::Sample, Metric, MetricsError, Target}; -use std::sync::{Arc, Mutex}; use uuid::Uuid; -/// Describes a collection that holds other resources. -/// -/// Example targets might include projects, organizations, silos or fleets. -#[derive(Debug, Clone, Target)] -struct CollectionTarget { - id: Uuid, -} - -#[derive(Debug, Clone, Metric)] -struct VirtualDiskSpaceProvisioned { - #[datum] - bytes_used: i64, -} - -#[derive(Debug, Clone, Metric)] -struct CpusProvisioned { - #[datum] - cpus: i64, -} - -#[derive(Debug, Clone, Metric)] -struct RamProvisioned { - #[datum] - bytes: i64, -} - -/// An oximeter producer for reporting [`VirtualProvisioningCollection`] information to Clickhouse. -/// -/// This producer collects samples whenever the database record for a collection -/// is created or updated. This implies that the CockroachDB record is always -/// kept up-to-date, and the Clickhouse historical records are batched and -/// transmitted once they are collected (as is the norm for Clickhouse metrics). -#[derive(Debug, Default, Clone)] -pub(crate) struct Producer { - samples: Arc>>, -} - -impl Producer { - pub fn new() -> Self { - Self { samples: Arc::new(Mutex::new(vec![])) } - } - - fn append_disk_metrics( - &self, - provisions: &Vec, - ) { - let new_samples = provisions - .iter() - .map(|provision| { - Sample::new( - &CollectionTarget { id: provision.id }, - &VirtualDiskSpaceProvisioned { - bytes_used: provision - .virtual_disk_bytes_provisioned - .into(), - }, - ) - }) - .collect::>(); - - self.append(new_samples); - } - - fn append_cpu_metrics( - &self, - provisions: &Vec, - ) { - let new_samples = provisions - .iter() - .map(|provision| { - Sample::new( - &CollectionTarget { id: provision.id }, - &CpusProvisioned { cpus: provision.cpus_provisioned }, - ) - }) - .chain(provisions.iter().map(|provision| { - Sample::new( - &CollectionTarget { id: provision.id }, - &RamProvisioned { bytes: provision.ram_provisioned.into() }, - ) - })) - .collect::>(); - - self.append(new_samples); - } - - fn append(&self, mut new_samples: Vec) { - let mut pending_samples = self.samples.lock().unwrap(); - pending_samples.append(&mut new_samples); - } -} - -impl oximeter::Producer for Producer { - fn produce( - &mut self, - ) -> Result + 'static>, MetricsError> { - let samples = - std::mem::replace(&mut *self.samples.lock().unwrap(), vec![]); - Ok(Box::new(samples.into_iter())) - } -} - /// The types of resources which can consume storage space. pub enum StorageType { Disk, @@ -185,9 +81,7 @@ impl DataStore { ) })?; self.virtual_provisioning_collection_producer - .append_disk_metrics(&provisions); - self.virtual_provisioning_collection_producer - .append_cpu_metrics(&provisions); + .append_all_metrics(&provisions); Ok(provisions) } From 292ee96afd95db34af291af53581d8e90d9f96b2 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 13 Jan 2023 10:46:28 -0500 Subject: [PATCH 62/80] Local to txn --- nexus/src/db/pool.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nexus/src/db/pool.rs b/nexus/src/db/pool.rs index fb44e9be200..3b5c243bf6a 100644 --- a/nexus/src/db/pool.rs +++ b/nexus/src/db/pool.rs @@ -74,7 +74,7 @@ const DISALLOW_FULL_TABLE_SCAN_SQL: &str = "set disallow_full_table_scans = on; set large_full_scan_rows = 0;"; #[cfg(test)] pub const ALLOW_FULL_TABLE_SCAN_SQL: &str = - "set disallow_full_table_scans = off; set large_full_scan_rows = 1000;"; + "set local disallow_full_table_scans = off; set local large_full_scan_rows = 1000;"; #[derive(Debug)] struct DisallowFullTableScans {} From 2ab2a3f5f0db9d1c252eee22bbe968f6528a3d63 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 13 Jan 2023 11:53:11 -0500 Subject: [PATCH 63/80] fix tests --- nexus/tests/integration_tests/instances.rs | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index dcd69e0f702..c847ef24d75 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -586,8 +586,9 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { } // Create an instance. - let instance_url = format!("{}/just-rainsticks", url_instances); - create_instance(client, ORGANIZATION_NAME, PROJECT_NAME, "just-rainsticks") + let instance_name = "just-rainsticks"; + let instance_url = format!("{url_instances}/{instance_name}"); + create_instance(client, ORGANIZATION_NAME, PROJECT_NAME, instance_name) .await; let virtual_provisioning_collection = datastore .virtual_provisioning_collection_get(&opctx, project_id) @@ -601,7 +602,7 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { // Stop the instance let instance = - instance_post(&client, &instance_url, InstanceOp::Stop).await; + instance_post(&client, instance_name, InstanceOp::Stop).await; instance_simulate(nexus, &instance.identity.id).await; let instance = instance_get(&client, &instance_url).await; assert_eq!(instance.runtime.run_state, InstanceState::Stopped); @@ -755,13 +756,9 @@ async fn test_instances_delete_fails_when_running_succeeds_when_stopped( // Create an instance. let instance_url = get_instance_url(instance_name); - let instance = create_instance( - client, - ORGANIZATION_NAME, - PROJECT_NAME, - "just-rainsticks", - ) - .await; + let instance = + create_instance(client, ORGANIZATION_NAME, PROJECT_NAME, instance_name) + .await; // Simulate the instance booting. instance_simulate(nexus, &instance.identity.id).await; From 5f534d6961275bba32606463dce75f8eab03a6b0 Mon Sep 17 00:00:00 2001 From: Alan Hanson Date: Fri, 13 Jan 2023 08:55:51 -0800 Subject: [PATCH 64/80] Add a Nexus internal API endpoint for disk remove read only parent (#2157) When we create a disk, the saga generates both a disk_id and a volume_id We use both ids when we create the disk database record However, the `volume_id` here is only used as the key to store the volume data in the database. That volume_id is never exposed to anyone outside Nexus. For the actual volume data structure and later the VolumeConstructionRequest, we use the value in the disk_id, but call it volume_id. However, Propolis (who needs to call Nexus and tell it to remove a read only parent only has the top level disk UUID. The internal volume_id is not exposed. This adds a new internal API endpoint that will take the disk UUID, then lookup the volume_id and then call the volume layer remove read only parent saga. Adding a layer like this also allows tests that operate only on a volume level to not require any changes. Co-authored-by: Alan Hanson --- nexus/src/app/disk.rs | 22 ++++++++++++++++ nexus/src/internal_api/http_entrypoints.rs | 26 +++++++++++++++++++ openapi/nexus-internal.json | 29 ++++++++++++++++++++++ 3 files changed, 77 insertions(+) diff --git a/nexus/src/app/disk.rs b/nexus/src/app/disk.rs index afa9c75c0af..5edd9d19aa2 100644 --- a/nexus/src/app/disk.rs +++ b/nexus/src/app/disk.rs @@ -521,4 +521,26 @@ impl super::Nexus { .await?; Ok(()) } + + /// Remove a read only parent from a disk. + /// This is just a wrapper around the volume operation of the same + /// name, but we provide this interface when all the caller has is + /// the disk UUID as the internal volume_id is not exposed. + pub async fn disk_remove_read_only_parent( + self: &Arc, + opctx: &OpContext, + disk_id: Uuid, + ) -> DeleteResult { + // First get the internal volume ID that is stored in the disk + // database entry, once we have that just call the volume method + // to remove the read only parent. + let (.., db_disk) = LookupPath::new(opctx, &self.db_datastore) + .disk_id(disk_id) + .fetch() + .await?; + + self.volume_remove_read_only_parent(db_disk.volume_id).await?; + + Ok(()) + } } diff --git a/nexus/src/internal_api/http_entrypoints.rs b/nexus/src/internal_api/http_entrypoints.rs index 909aecdd728..8f22df4ca43 100644 --- a/nexus/src/internal_api/http_entrypoints.rs +++ b/nexus/src/internal_api/http_entrypoints.rs @@ -42,6 +42,7 @@ pub fn internal_api() -> NexusApiDescription { api.register(cpapi_instances_put)?; api.register(cpapi_disks_put)?; api.register(cpapi_volume_remove_read_only_parent)?; + api.register(cpapi_disk_remove_read_only_parent)?; api.register(cpapi_producers_post)?; api.register(cpapi_collectors_post)?; api.register(cpapi_metrics_collect)?; @@ -227,6 +228,31 @@ async fn cpapi_volume_remove_read_only_parent( apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await } +/// Request removal of a read_only_parent from a disk +/// This is a thin wrapper around the volume_remove_read_only_parent saga. +/// All we are doing here is, given a disk UUID, figure out what the +/// volume_id is for that disk, then use that to call the +/// volume_remove_read_only_parent saga on it. +#[endpoint { + method = POST, + path = "/disk/{disk_id}/remove-read-only-parent", + }] +async fn cpapi_disk_remove_read_only_parent( + rqctx: Arc>>, + path_params: Path, +) -> Result { + let apictx = rqctx.context(); + let nexus = &apictx.nexus; + let path = path_params.into_inner(); + + let handler = async { + let opctx = OpContext::for_internal_api(&rqctx).await; + nexus.disk_remove_read_only_parent(&opctx, path.disk_id).await?; + Ok(HttpResponseUpdatedNoContent()) + }; + apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + /// Accept a registration from a new metric producer #[endpoint { method = POST, diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index 48a2eb7e40d..b320e5cb1f8 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -59,6 +59,35 @@ } } }, + "/disk/{disk_id}/remove-read-only-parent": { + "post": { + "summary": "Request removal of a read_only_parent from a disk", + "description": "This is a thin wrapper around the volume_remove_read_only_parent saga. All we are doing here is, given a disk UUID, figure out what the volume_id is for that disk, then use that to call the volume_remove_read_only_parent saga on it.", + "operationId": "cpapi_disk_remove_read_only_parent", + "parameters": [ + { + "in": "path", + "name": "disk_id", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/disks/{disk_id}": { "put": { "summary": "Report updated state for a disk.", From ade45d51d9b4a011ca0b6ed4cb9dee1f960e6a62 Mon Sep 17 00:00:00 2001 From: Rain Date: Fri, 13 Jan 2023 11:01:40 -0800 Subject: [PATCH 65/80] [wicket] add missing serde feature on hex dependency (#2163) --- wicket/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wicket/Cargo.toml b/wicket/Cargo.toml index b19b2c18c11..99507c87e2e 100644 --- a/wicket/Cargo.toml +++ b/wicket/Cargo.toml @@ -10,7 +10,7 @@ anyhow.workspace = true clap.workspace = true crossterm = { version = "0.25.0", features = ["event-stream"] } futures.workspace = true -hex.workspace = true +hex = { workspace = true, features = ["serde"] } reqwest.workspace = true semver = { version = "1.0.16", features = ["std", "serde"] } serde.workspace = true From eaef8713b1d8ec8245b9d7b561d4090b4aaf9717 Mon Sep 17 00:00:00 2001 From: Rain Date: Fri, 13 Jan 2023 13:10:25 -0800 Subject: [PATCH 66/80] [wicket] move wizard into its own file (#2164) Currently, wizard is the main entrypoint for wicket. To support uploading artifacts, We're going to change it so that it has to go through a clap command layer. Also add a `default-run` so that `cargo run -p wicket` runs the wicket binary. --- wicket/Cargo.toml | 1 + wicket/src/lib.rs | 381 +-------------------------- wicket/src/screens/component.rs | 13 +- wicket/src/screens/mod.rs | 7 +- wicket/src/screens/rack.rs | 11 +- wicket/src/screens/splash.rs | 18 +- wicket/src/wicketd.rs | 3 +- wicket/src/widgets/screen_button.rs | 3 +- wicket/src/wizard.rs | 383 ++++++++++++++++++++++++++++ 9 files changed, 401 insertions(+), 419 deletions(-) create mode 100644 wicket/src/wizard.rs diff --git a/wicket/Cargo.toml b/wicket/Cargo.toml index 99507c87e2e..0f314569415 100644 --- a/wicket/Cargo.toml +++ b/wicket/Cargo.toml @@ -4,6 +4,7 @@ description = "Technician port TUI" version = "0.1.0" edition = "2021" license = "MPL-2.0" +default-run = "wicket" [dependencies] anyhow.workspace = true diff --git a/wicket/src/lib.rs b/wicket/src/lib.rs index 71b669dcc6b..2da59d13367 100644 --- a/wicket/src/lib.rs +++ b/wicket/src/lib.rs @@ -9,389 +9,12 @@ //! that will guide the user through the steps the need to take //! in an intuitive manner. -use crossterm::event::Event as TermEvent; -use crossterm::event::EventStream; -use crossterm::event::{DisableMouseCapture, EnableMouseCapture}; -use crossterm::event::{KeyCode, KeyEvent, KeyModifiers}; -use crossterm::event::{MouseEvent, MouseEventKind}; -use crossterm::execute; -use crossterm::terminal::{ - disable_raw_mode, enable_raw_mode, EnterAlternateScreen, - LeaveAlternateScreen, -}; -use futures::StreamExt; -use slog::{error, info, Drain}; -use std::io::{stdout, Stdout}; -use std::net::SocketAddrV6; -use std::sync::mpsc::{channel, Receiver, Sender}; -use tokio::time::{interval, Duration}; -use tui::backend::CrosstermBackend; -use tui::Terminal; - pub(crate) mod defaults; pub(crate) mod inventory; mod screens; pub mod update; mod wicketd; mod widgets; +mod wizard; -use inventory::Inventory; -use screens::{Height, ScreenId, Screens}; -use wicketd::{WicketdHandle, WicketdManager}; -use wicketd_client::types::RackV1Inventory; -use widgets::RackState; - -pub const MARGIN: Height = Height(5); - -// We can avoid a bunch of unnecessary type parameters by picking them ahead of time. -pub type Term = Terminal>; -pub type Frame<'a> = tui::Frame<'a, CrosstermBackend>; - -/// The core type of this library is the `Wizard`. -/// -/// A `Wizard` manages a set of screens, where each screen represents a -/// specific step in the user process. Each screen is drawable, and the -/// active screen is rendered on every tick. The [`Wizard`] manages which -/// screen is active, issues the rendering operation to the terminal, and -/// communicates with other threads and async tasks to receive user input -/// and drive backend services. -pub struct Wizard { - // The currently active screen - active_screen: ScreenId, - - // All the screens managed by the [`Wizard`] - screens: Screens, - - // The [`Wizard`] is purely single threaded. Every interaction with the - // outside world is via channels. All receiving from the outside world - // comes in via an `Event` over a single channel. - // - // Doing this allows us to record and replay all received events, which - // will deterministically draw the output of the UI, as long as we disable - // any output to downstream services. - // - // This effectively acts as a way to mock real responses from servers - // without ever having to run those servers or even send the requests that - // triggered the incoming events! - // - // Note that for resize events or other terminal specific events we'll - // likely have to "output" them to fake the same interaction. - events_rx: Receiver, - - // We save a copy here so we can hand it out to event producers - events_tx: Sender, - - // The internal state of the Wizard - // This contains all updatable data - state: State, - - // A mechanism for interacting with `wicketd` - #[allow(unused)] - wicketd: WicketdHandle, - - // When the Wizard is run, this will be extracted and moved - // into a tokio task. - wicketd_manager: Option, - - // The terminal we are rendering to - terminal: Term, - - // Our friendly neighborhood logger - log: slog::Logger, - - // The tokio runtime for everything outside the main thread - tokio_rt: tokio::runtime::Runtime, -} - -#[allow(clippy::new_without_default)] -impl Wizard { - pub fn new() -> Wizard { - // TODO: make this configurable? - let wicketd_addr: SocketAddrV6 = "[::1]:8000".parse().unwrap(); - let log = Self::setup_log("/tmp/wicket.log").unwrap(); - let screens = Screens::new(&log); - let (events_tx, events_rx) = channel(); - let state = State::new(); - let backend = CrosstermBackend::new(stdout()); - let terminal = Terminal::new(backend).unwrap(); - let tokio_rt = tokio::runtime::Builder::new_multi_thread() - .enable_all() - .build() - .unwrap(); - let (wicketd, wicketd_manager) = - WicketdManager::new(&log, events_tx.clone(), wicketd_addr); - Wizard { - screens, - active_screen: ScreenId::Splash, - events_rx, - events_tx, - state, - wicketd, - wicketd_manager: Some(wicketd_manager), - terminal, - log, - tokio_rt, - } - } - - pub fn setup_log(path: &str) -> anyhow::Result { - let file = std::fs::OpenOptions::new() - .create(true) - .write(true) - .truncate(true) - .open(path)?; - - let decorator = slog_term::PlainDecorator::new(file); - let drain = slog_term::FullFormat::new(decorator).build().fuse(); - let drain = slog_async::Async::new(drain).build().fuse(); - - Ok(slog::Logger::root(drain, slog::o!())) - } - - pub fn run(&mut self) -> anyhow::Result<()> { - self.start_tokio_runtime(); - enable_raw_mode()?; - execute!( - self.terminal.backend_mut(), - EnterAlternateScreen, - EnableMouseCapture - )?; - self.mainloop()?; - disable_raw_mode()?; - execute!( - self.terminal.backend_mut(), - LeaveAlternateScreen, - DisableMouseCapture - )?; - Ok(()) - } - - fn mainloop(&mut self) -> anyhow::Result<()> { - info!(self.log, "Starting main loop"); - let rect = self.terminal.get_frame().size(); - // Size the rack for the initial draw - self.state.rack_state.resize(rect.width, rect.height, &MARGIN); - - // Draw the initial screen - let screen = self.screens.get_mut(self.active_screen); - screen.resize(&mut self.state, rect.width, rect.height); - screen.draw(&self.state, &mut self.terminal)?; - - loop { - let screen = self.screens.get_mut(self.active_screen); - // unwrap is safe because we always hold onto a Sender - let event = self.events_rx.recv().unwrap(); - match event { - Event::Tick => { - let actions = screen.on(&mut self.state, ScreenEvent::Tick); - self.handle_actions(actions)?; - } - Event::Term(TermEvent::Key(key_event)) => { - if is_control_c(&key_event) { - info!(self.log, "CTRL-C Pressed. Exiting."); - break; - } - let actions = screen.on( - &mut self.state, - ScreenEvent::Term(TermEvent::Key(key_event)), - ); - self.handle_actions(actions)?; - } - Event::Term(TermEvent::Resize(width, height)) => { - self.state.rack_state.resize(width, height, &MARGIN); - screen.resize(&mut self.state, width, height); - screen.draw(&self.state, &mut self.terminal)?; - } - Event::Term(TermEvent::Mouse(mouse_event)) => { - self.state.mouse = - Point { x: mouse_event.column, y: mouse_event.row }; - let actions = screen.on( - &mut self.state, - ScreenEvent::Term(TermEvent::Mouse(mouse_event)), - ); - self.handle_actions(actions)?; - } - Event::Inventory(inventory) => { - if let Err(e) = - self.state.inventory.update_inventory(inventory) - { - error!(self.log, "Failed to update inventory: {e}",); - } else { - // Inventory changed. Redraw the screen. - screen.draw(&self.state, &mut self.terminal)?; - } - } - _ => info!(self.log, "{:?}", event), - } - } - Ok(()) - } - - fn handle_actions(&mut self, actions: Vec) -> anyhow::Result<()> { - for action in actions { - match action { - Action::Redraw => { - let screen = self.screens.get_mut(self.active_screen); - screen.draw(&self.state, &mut self.terminal)?; - } - Action::SwitchScreen(id) => { - self.active_screen = id; - let screen = self.screens.get_mut(id); - let rect = self.terminal.get_frame().size(); - - screen.resize(&mut self.state, rect.width, rect.height); - - // Simulate a mouse movement for the current position - // because the mouse may be in a different position when transitioning - // between screens. - let mouse_event = MouseEvent { - kind: MouseEventKind::Moved, - column: self.state.mouse.x, - row: self.state.mouse.y, - modifiers: KeyModifiers::NONE, - }; - let event = - ScreenEvent::Term(TermEvent::Mouse(mouse_event)); - // We ignore actions, as they can only be draw actions, and - // we are about to draw. - let _ = screen.on(&mut self.state, event); - screen.draw(&self.state, &mut self.terminal)?; - } - } - } - Ok(()) - } - - fn start_tokio_runtime(&mut self) { - let events_tx = self.events_tx.clone(); - let log = self.log.clone(); - let wicketd_manager = self.wicketd_manager.take().unwrap(); - self.tokio_rt.block_on(async { - run_event_listener(log.clone(), events_tx).await; - tokio::spawn(async move { - wicketd_manager.run().await; - }); - }); - } -} - -fn is_control_c(key_event: &KeyEvent) -> bool { - key_event.code == KeyCode::Char('c') - && key_event.modifiers == KeyModifiers::CONTROL -} - -/// Listen for terminal related events -async fn run_event_listener(log: slog::Logger, events_tx: Sender) { - info!(log, "Starting event listener"); - tokio::spawn(async move { - let mut events = EventStream::new(); - let mut ticker = interval(Duration::from_millis(30)); - loop { - tokio::select! { - _ = ticker.tick() => { - if events_tx.send(Event::Tick).is_err() { - info!(log, "Event listener completed"); - // The receiver was dropped. Program is ending. - return; - } - } - event = events.next() => { - let event = match event { - None => { - error!(log, "Event stream completed. Shutting down."); - return; - } - Some(Ok(event)) => event, - Some(Err(e)) => { - // TODO: Issue a shutdown - error!(log, "Failed to receive event: {:?}", e); - return; - } - }; - if events_tx.send(Event::Term(event)).is_err() { - info!(log, "Event listener completed"); - // The receiver was dropped. Program is ending. - return; - } - - } - } - } - }); -} - -#[derive(Debug, Clone, Copy, Default)] -pub struct Point { - pub x: u16, - pub y: u16, -} - -/// The data state of the Wizard -/// -/// Data is not tied to any specific screen and is updated upon event receipt. -#[derive(Debug)] -pub struct State { - pub inventory: Inventory, - pub rack_state: RackState, - pub mouse: Point, -} - -impl Default for State { - fn default() -> Self { - Self::new() - } -} - -impl State { - pub fn new() -> State { - State { - inventory: Inventory::default(), - rack_state: RackState::new(), - mouse: Point::default(), - } - } -} - -/// Send requests to RSS -/// -/// Replies come in as [`Event`]s -pub struct RssManager {} - -/// An event that will update state in the wizard -/// -/// This can be a keypress, mouse event, or response from a downstream service. -#[derive(Debug)] -pub enum Event { - /// An input event from the terminal - Term(TermEvent), - - /// An Inventory Update Event - Inventory(RackV1Inventory), - - /// The tick of a Timer - /// This can be used to draw a frame to the terminal - Tick, - //... TODO: Replies from MGS & RSS -} - -/// An action for the system to take. -/// -/// This can be something like a screen transition or calling a downstream -/// service. Screens never take actions directly, but they are the only ones -/// that know what visual content an input such as a key press or mouse event -/// is meant for and what action should be taken in that case. -pub enum Action { - Redraw, - SwitchScreen(ScreenId), -} - -/// Events sent to a screen -/// -/// These are a subset of [`Event`] -pub enum ScreenEvent { - /// An input event from the terminal - Term(crossterm::event::Event), - - /// The tick of a timer - Tick, -} +pub use crate::wizard::*; diff --git a/wicket/src/screens/component.rs b/wicket/src/screens/component.rs index 197b4d1a222..c57604e5ce8 100644 --- a/wicket/src/screens/component.rs +++ b/wicket/src/screens/component.rs @@ -9,16 +9,13 @@ use crate::defaults::colors::*; use crate::defaults::dimensions::RectExt; use crate::defaults::dimensions::MENUBAR_HEIGHT; use crate::defaults::style; +use crate::screens::ScreenId; use crate::widgets::Control; use crate::widgets::ControlId; use crate::widgets::HelpMenuState; use crate::widgets::{HelpButton, HelpButtonState, HelpMenu}; use crate::widgets::{ScreenButton, ScreenButtonState}; -use crate::Action; -use crate::Frame; -use crate::ScreenEvent; -use crate::ScreenId; -use crate::State; +use crate::wizard::{Action, Frame, ScreenEvent, State, Term}; use crossterm::event::Event as TermEvent; use crossterm::event::{ KeyCode, KeyEvent, KeyModifiers, MouseButton, MouseEvent, MouseEventKind, @@ -303,11 +300,7 @@ impl ComponentScreen { } impl Screen for ComponentScreen { - fn draw( - &self, - state: &State, - terminal: &mut crate::Term, - ) -> anyhow::Result<()> { + fn draw(&self, state: &State, terminal: &mut Term) -> anyhow::Result<()> { terminal.draw(|f| { self.draw_background(f); self.draw_menubar(f, state); diff --git a/wicket/src/screens/mod.rs b/wicket/src/screens/mod.rs index 5e202046e4b..2e5a5a5f1dd 100644 --- a/wicket/src/screens/mod.rs +++ b/wicket/src/screens/mod.rs @@ -6,11 +6,8 @@ mod component; mod rack; mod splash; -use crate::Action; -use crate::ScreenEvent; -use crate::State; -use crate::Term; -use crate::TermEvent; +use crate::wizard::{Action, ScreenEvent, State, Term}; +use crossterm::event::Event as TermEvent; use slog::Logger; use component::ComponentScreen; diff --git a/wicket/src/screens/rack.rs b/wicket/src/screens/rack.rs index eee384b7d16..11cee49a7be 100644 --- a/wicket/src/screens/rack.rs +++ b/wicket/src/screens/rack.rs @@ -12,10 +12,7 @@ use crate::widgets::Control; use crate::widgets::ControlId; use crate::widgets::HelpMenuState; use crate::widgets::{Banner, HelpButton, HelpButtonState, HelpMenu, Rack}; -use crate::Action; -use crate::Frame; -use crate::ScreenEvent; -use crate::State; +use crate::wizard::{Action, Frame, ScreenEvent, State, Term}; use crossterm::event::Event as TermEvent; use crossterm::event::{ KeyCode, KeyEvent, KeyModifiers, MouseButton, MouseEvent, MouseEventKind, @@ -276,11 +273,7 @@ impl RackScreen { } impl Screen for RackScreen { - fn draw( - &self, - state: &State, - terminal: &mut crate::Term, - ) -> anyhow::Result<()> { + fn draw(&self, state: &State, terminal: &mut Term) -> anyhow::Result<()> { terminal.draw(|f| { self.draw_background(f); self.draw_rack(state, f); diff --git a/wicket/src/screens/splash.rs b/wicket/src/screens/splash.rs index 4e489567f77..dd7b170f931 100644 --- a/wicket/src/screens/splash.rs +++ b/wicket/src/screens/splash.rs @@ -10,10 +10,8 @@ use super::{Screen, ScreenId}; use crate::defaults::colors::*; use crate::defaults::dimensions::RectExt; use crate::widgets::{Logo, LogoState, LOGO_HEIGHT, LOGO_WIDTH}; -use crate::Action; -use crate::Frame; -use crate::ScreenEvent; -use crate::TermEvent; +use crate::wizard::{Action, Frame, ScreenEvent, State, Term}; +use crossterm::event::Event as TermEvent; use tui::style::{Color, Style}; use tui::widgets::Block; @@ -61,11 +59,7 @@ impl SplashScreen { } impl Screen for SplashScreen { - fn draw( - &self, - _state: &crate::State, - terminal: &mut crate::Term, - ) -> anyhow::Result<()> { + fn draw(&self, _state: &State, terminal: &mut Term) -> anyhow::Result<()> { terminal.draw(|f| { self.draw_background(f); self.animate_logo(f); @@ -73,11 +67,7 @@ impl Screen for SplashScreen { Ok(()) } - fn on( - &mut self, - _state: &mut crate::State, - event: ScreenEvent, - ) -> Vec { + fn on(&mut self, _state: &mut State, event: ScreenEvent) -> Vec { match event { ScreenEvent::Tick => { self.state.frame += 1; diff --git a/wicket/src/wicketd.rs b/wicket/src/wicketd.rs index 9529b525870..51db0ab741f 100644 --- a/wicket/src/wicketd.rs +++ b/wicket/src/wicketd.rs @@ -4,7 +4,6 @@ //! Code for talking to wicketd -use crate::Event; use slog::{debug, o, warn, Logger}; use std::net::SocketAddrV6; use std::sync::mpsc::Sender; @@ -12,6 +11,8 @@ use tokio::sync::mpsc; use tokio::time::{interval, Duration, MissedTickBehavior}; use wicketd_client::types::RackV1Inventory; +use crate::wizard::Event; + const WICKETD_POLL_INTERVAL: Duration = Duration::from_secs(5); const WICKETD_TIMEOUT_MS: u32 = 1000; diff --git a/wicket/src/widgets/screen_button.rs b/wicket/src/widgets/screen_button.rs index cc2fd297cc9..41178a4de45 100644 --- a/wicket/src/widgets/screen_button.rs +++ b/wicket/src/widgets/screen_button.rs @@ -4,10 +4,11 @@ //! A help button that brings up a help menu when selected +use crate::screens::ScreenId; + use super::get_control_id; use super::Control; use super::ControlId; -use crate::ScreenId; use tui::buffer::Buffer; use tui::layout::Rect; use tui::style::Style; diff --git a/wicket/src/wizard.rs b/wicket/src/wizard.rs new file mode 100644 index 00000000000..91025c08ac7 --- /dev/null +++ b/wicket/src/wizard.rs @@ -0,0 +1,383 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crossterm::event::Event as TermEvent; +use crossterm::event::EventStream; +use crossterm::event::{DisableMouseCapture, EnableMouseCapture}; +use crossterm::event::{KeyCode, KeyEvent, KeyModifiers}; +use crossterm::event::{MouseEvent, MouseEventKind}; +use crossterm::execute; +use crossterm::terminal::{ + disable_raw_mode, enable_raw_mode, EnterAlternateScreen, + LeaveAlternateScreen, +}; +use futures::StreamExt; +use slog::{error, info, Drain}; +use std::io::{stdout, Stdout}; +use std::net::SocketAddrV6; +use std::sync::mpsc::{channel, Receiver, Sender}; +use tokio::time::{interval, Duration}; +use tui::backend::CrosstermBackend; +use tui::Terminal; + +use crate::inventory::Inventory; +use crate::screens::{Height, ScreenId, Screens}; +use crate::wicketd::{WicketdHandle, WicketdManager}; +use crate::widgets::RackState; +use wicketd_client::types::RackV1Inventory; + +pub const MARGIN: Height = Height(5); + +// We can avoid a bunch of unnecessary type parameters by picking them ahead of time. +pub type Term = Terminal>; +pub type Frame<'a> = tui::Frame<'a, CrosstermBackend>; + +/// The core type of this library is the `Wizard`. +/// +/// A `Wizard` manages a set of screens, where each screen represents a +/// specific step in the user process. Each screen is drawable, and the +/// active screen is rendered on every tick. The [`Wizard`] manages which +/// screen is active, issues the rendering operation to the terminal, and +/// communicates with other threads and async tasks to receive user input +/// and drive backend services. +pub struct Wizard { + // The currently active screen + active_screen: ScreenId, + + // All the screens managed by the [`Wizard`] + screens: Screens, + + // The [`Wizard`] is purely single threaded. Every interaction with the + // outside world is via channels. All receiving from the outside world + // comes in via an `Event` over a single channel. + // + // Doing this allows us to record and replay all received events, which + // will deterministically draw the output of the UI, as long as we disable + // any output to downstream services. + // + // This effectively acts as a way to mock real responses from servers + // without ever having to run those servers or even send the requests that + // triggered the incoming events! + // + // Note that for resize events or other terminal specific events we'll + // likely have to "output" them to fake the same interaction. + events_rx: Receiver, + + // We save a copy here so we can hand it out to event producers + events_tx: Sender, + + // The internal state of the Wizard + // This contains all updatable data + state: State, + + // A mechanism for interacting with `wicketd` + #[allow(unused)] + wicketd: WicketdHandle, + + // When the Wizard is run, this will be extracted and moved + // into a tokio task. + wicketd_manager: Option, + + // The terminal we are rendering to + terminal: Term, + + // Our friendly neighborhood logger + log: slog::Logger, + + // The tokio runtime for everything outside the main thread + tokio_rt: tokio::runtime::Runtime, +} + +#[allow(clippy::new_without_default)] +impl Wizard { + pub fn new() -> Wizard { + // TODO: make this configurable? + let wicketd_addr: SocketAddrV6 = "[::1]:8000".parse().unwrap(); + let log = Self::setup_log("/tmp/wicket.log").unwrap(); + let screens = Screens::new(&log); + let (events_tx, events_rx) = channel(); + let state = State::new(); + let backend = CrosstermBackend::new(stdout()); + let terminal = Terminal::new(backend).unwrap(); + let tokio_rt = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .unwrap(); + let (wicketd, wicketd_manager) = + WicketdManager::new(&log, events_tx.clone(), wicketd_addr); + Wizard { + screens, + active_screen: ScreenId::Splash, + events_rx, + events_tx, + state, + wicketd, + wicketd_manager: Some(wicketd_manager), + terminal, + log, + tokio_rt, + } + } + + pub fn setup_log(path: &str) -> anyhow::Result { + let file = std::fs::OpenOptions::new() + .create(true) + .write(true) + .truncate(true) + .open(path)?; + + let decorator = slog_term::PlainDecorator::new(file); + let drain = slog_term::FullFormat::new(decorator).build().fuse(); + let drain = slog_async::Async::new(drain).build().fuse(); + + Ok(slog::Logger::root(drain, slog::o!())) + } + + pub fn run(&mut self) -> anyhow::Result<()> { + self.start_tokio_runtime(); + enable_raw_mode()?; + execute!( + self.terminal.backend_mut(), + EnterAlternateScreen, + EnableMouseCapture + )?; + self.mainloop()?; + disable_raw_mode()?; + execute!( + self.terminal.backend_mut(), + LeaveAlternateScreen, + DisableMouseCapture + )?; + Ok(()) + } + + fn mainloop(&mut self) -> anyhow::Result<()> { + info!(self.log, "Starting main loop"); + let rect = self.terminal.get_frame().size(); + // Size the rack for the initial draw + self.state.rack_state.resize(rect.width, rect.height, &MARGIN); + + // Draw the initial screen + let screen = self.screens.get_mut(self.active_screen); + screen.resize(&mut self.state, rect.width, rect.height); + screen.draw(&self.state, &mut self.terminal)?; + + loop { + let screen = self.screens.get_mut(self.active_screen); + // unwrap is safe because we always hold onto a Sender + let event = self.events_rx.recv().unwrap(); + match event { + Event::Tick => { + let actions = screen.on(&mut self.state, ScreenEvent::Tick); + self.handle_actions(actions)?; + } + Event::Term(TermEvent::Key(key_event)) => { + if is_control_c(&key_event) { + info!(self.log, "CTRL-C Pressed. Exiting."); + break; + } + let actions = screen.on( + &mut self.state, + ScreenEvent::Term(TermEvent::Key(key_event)), + ); + self.handle_actions(actions)?; + } + Event::Term(TermEvent::Resize(width, height)) => { + self.state.rack_state.resize(width, height, &MARGIN); + screen.resize(&mut self.state, width, height); + screen.draw(&self.state, &mut self.terminal)?; + } + Event::Term(TermEvent::Mouse(mouse_event)) => { + self.state.mouse = + Point { x: mouse_event.column, y: mouse_event.row }; + let actions = screen.on( + &mut self.state, + ScreenEvent::Term(TermEvent::Mouse(mouse_event)), + ); + self.handle_actions(actions)?; + } + Event::Inventory(inventory) => { + if let Err(e) = + self.state.inventory.update_inventory(inventory) + { + error!(self.log, "Failed to update inventory: {e}",); + } else { + // Inventory changed. Redraw the screen. + screen.draw(&self.state, &mut self.terminal)?; + } + } + _ => info!(self.log, "{:?}", event), + } + } + Ok(()) + } + + fn handle_actions(&mut self, actions: Vec) -> anyhow::Result<()> { + for action in actions { + match action { + Action::Redraw => { + let screen = self.screens.get_mut(self.active_screen); + screen.draw(&self.state, &mut self.terminal)?; + } + Action::SwitchScreen(id) => { + self.active_screen = id; + let screen = self.screens.get_mut(id); + let rect = self.terminal.get_frame().size(); + + screen.resize(&mut self.state, rect.width, rect.height); + + // Simulate a mouse movement for the current position + // because the mouse may be in a different position when transitioning + // between screens. + let mouse_event = MouseEvent { + kind: MouseEventKind::Moved, + column: self.state.mouse.x, + row: self.state.mouse.y, + modifiers: KeyModifiers::NONE, + }; + let event = + ScreenEvent::Term(TermEvent::Mouse(mouse_event)); + // We ignore actions, as they can only be draw actions, and + // we are about to draw. + let _ = screen.on(&mut self.state, event); + screen.draw(&self.state, &mut self.terminal)?; + } + } + } + Ok(()) + } + + fn start_tokio_runtime(&mut self) { + let events_tx = self.events_tx.clone(); + let log = self.log.clone(); + let wicketd_manager = self.wicketd_manager.take().unwrap(); + self.tokio_rt.block_on(async { + run_event_listener(log.clone(), events_tx).await; + tokio::spawn(async move { + wicketd_manager.run().await; + }); + }); + } +} + +fn is_control_c(key_event: &KeyEvent) -> bool { + key_event.code == KeyCode::Char('c') + && key_event.modifiers == KeyModifiers::CONTROL +} + +/// Listen for terminal related events +async fn run_event_listener(log: slog::Logger, events_tx: Sender) { + info!(log, "Starting event listener"); + tokio::spawn(async move { + let mut events = EventStream::new(); + let mut ticker = interval(Duration::from_millis(30)); + loop { + tokio::select! { + _ = ticker.tick() => { + if events_tx.send(Event::Tick).is_err() { + info!(log, "Event listener completed"); + // The receiver was dropped. Program is ending. + return; + } + } + event = events.next() => { + let event = match event { + None => { + error!(log, "Event stream completed. Shutting down."); + return; + } + Some(Ok(event)) => event, + Some(Err(e)) => { + // TODO: Issue a shutdown + error!(log, "Failed to receive event: {:?}", e); + return; + } + }; + if events_tx.send(Event::Term(event)).is_err() { + info!(log, "Event listener completed"); + // The receiver was dropped. Program is ending. + return; + } + + } + } + } + }); +} + +#[derive(Debug, Clone, Copy, Default)] +pub struct Point { + pub x: u16, + pub y: u16, +} + +/// The data state of the Wizard +/// +/// Data is not tied to any specific screen and is updated upon event receipt. +#[derive(Debug)] +pub struct State { + pub inventory: Inventory, + pub rack_state: RackState, + pub mouse: Point, +} + +impl Default for State { + fn default() -> Self { + Self::new() + } +} + +impl State { + pub fn new() -> State { + State { + inventory: Inventory::default(), + rack_state: RackState::new(), + mouse: Point::default(), + } + } +} + +/// Send requests to RSS +/// +/// Replies come in as [`Event`]s +pub struct RssManager {} + +/// An event that will update state in the wizard +/// +/// This can be a keypress, mouse event, or response from a downstream service. +#[derive(Debug)] +pub enum Event { + /// An input event from the terminal + Term(TermEvent), + + /// An Inventory Update Event + Inventory(RackV1Inventory), + + /// The tick of a Timer + /// This can be used to draw a frame to the terminal + Tick, + //... TODO: Replies from MGS & RSS +} + +/// An action for the system to take. +/// +/// This can be something like a screen transition or calling a downstream +/// service. Screens never take actions directly, but they are the only ones +/// that know what visual content an input such as a key press or mouse event +/// is meant for and what action should be taken in that case. +pub enum Action { + Redraw, + SwitchScreen(ScreenId), +} + +/// Events sent to a screen +/// +/// These are a subset of [`Event`] +pub enum ScreenEvent { + /// An input event from the terminal + Term(crossterm::event::Event), + + /// The tick of a timer + Tick, +} From 516acbc673392d9b213101e5c29a360d68ebd885 Mon Sep 17 00:00:00 2001 From: Rain Date: Fri, 13 Jan 2023 17:18:30 -0800 Subject: [PATCH 67/80] [wicket] fix call to get_prev_component_id (#2167) Previously, we were showing the next component ID in both the previous and next spots. --- wicket/src/screens/component.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wicket/src/screens/component.rs b/wicket/src/screens/component.rs index c57604e5ce8..3e703cee830 100644 --- a/wicket/src/screens/component.rs +++ b/wicket/src/screens/component.rs @@ -103,7 +103,7 @@ impl ComponentScreen { // TODO: Some sliding style animation? let title = Spans::from(vec![ Span::styled( - state.rack_state.get_next_component_id().name(), + state.rack_state.get_prev_component_id().name(), menu_bar_style, ), Span::raw(" "), From 338cbd07ba1854ef7aefb42ca332f36b778ee9bf Mon Sep 17 00:00:00 2001 From: Rain Date: Fri, 13 Jan 2023 23:36:55 -0800 Subject: [PATCH 68/80] [wicket] add an upload command (#2165) Add a command that can upload data to wicketd, via stdin from wicket. This also makes wicket behave like a login shell: read arguments passed in with `-c`. Tested by creating a user locally with wicket as the login shell, then running: ``` % cat foo.bin | ssh wicket-test@localhost upload my-artifact 0.1.0 Jan 13 22:12:25.946 INFO read artifact my-artifact:0.1.0 (11957848 bytes) from stdin, uploading to wicketd Jan 13 22:12:25.972 INFO successfully uploaded my-artifact:0.1.0 (11957848 bytes) to wicketd ``` --- Cargo.lock | 40 +++++- Cargo.toml | 2 + wicket/Cargo.toml | 4 + wicket/README.md | 18 +++ wicket/src/bin/wicket.rs | 13 +- wicket/src/dispatch.rs | 123 ++++++++++++++++++ wicket/src/lib.rs | 3 + wicket/src/upload.rs | 96 ++++++++++++++ wicket/src/wicketd.rs | 33 ++--- wicket/src/wizard.rs | 21 +-- .../tests/integration_tests/command_tests.rs | 32 +++++ wicket/tests/integration_tests/mod.rs | 5 + wicket/tests/mod.rs | 17 +++ 13 files changed, 363 insertions(+), 44 deletions(-) create mode 100644 wicket/src/dispatch.rs create mode 100644 wicket/src/upload.rs create mode 100644 wicket/tests/integration_tests/command_tests.rs create mode 100644 wicket/tests/integration_tests/mod.rs create mode 100644 wicket/tests/mod.rs diff --git a/Cargo.lock b/Cargo.lock index ca2ef94c8ac..a2b0e899973 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -142,6 +142,20 @@ dependencies = [ "term", ] +[[package]] +name = "assert_cmd" +version = "2.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9834fcc22e0874394a010230586367d4a3e9f11b560f469262678547e1d2575e" +dependencies = [ + "bstr 1.1.0", + "doc-comment", + "predicates", + "predicates-core", + "predicates-tree", + "wait-timeout", +] + [[package]] name = "assert_matches" version = "1.5.0" @@ -503,6 +517,18 @@ dependencies = [ "regex-automata", ] +[[package]] +name = "bstr" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b45ea9b00a7b3f2988e9a65ad3917e62123c38dba709b666506207be96d1790b" +dependencies = [ + "memchr", + "once_cell", + "regex-automata", + "serde", +] + [[package]] name = "buf-list" version = "0.1.3" @@ -2133,7 +2159,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0a1e17342619edbc21a964c2afbeb6c820c6a2560032872f397bb97ea127bd0a" dependencies = [ "aho-corasick", - "bstr", + "bstr 0.2.17", "fnv", "log", "regex", @@ -2357,7 +2383,7 @@ version = "0.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6f25cfb6def593d43fae1ead24861f217e93bc70768a45cc149a69b5f049df4" dependencies = [ - "bstr", + "bstr 0.2.17", "bytes", "crossbeam-channel", "form_urlencoded", @@ -5598,6 +5624,12 @@ dependencies = [ "keccak", ] +[[package]] +name = "shell-words" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" + [[package]] name = "shlex" version = "1.1.0" @@ -7104,6 +7136,8 @@ name = "wicket" version = "0.1.0" dependencies = [ "anyhow", + "assert_cmd", + "camino", "clap 4.0.32", "crossterm", "futures", @@ -7113,8 +7147,10 @@ dependencies = [ "serde", "serde_json", "sha3", + "shell-words", "slog", "slog-async", + "slog-envlogger", "slog-term", "snafu", "tar", diff --git a/Cargo.toml b/Cargo.toml index 518a39502c7..98882ce81a5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -88,6 +88,7 @@ resolver = "2" anyhow = "1.0" api_identity = { path = "api_identity" } assert_matches = "1.5.0" +assert_cmd = "2.0.8" async-bb8-diesel = { git = "https://github.com/oxidecomputer/async-bb8-diesel", rev = "7944dafc8a36dc6e20a1405eca59d04662de2bb7" } async-trait = "0.1.60" authz-macros = { path = "nexus/authz-macros" } @@ -205,6 +206,7 @@ serde_urlencoded = "0.7.1" serde_with = "2.2.0" serial_test = "0.10" sha3 = "0.10.6" +shell-words = "1.1.0" signal-hook = "0.3" signal-hook-tokio = { version = "0.3", features = [ "futures-v0_3" ] } sled = "0.34" diff --git a/wicket/Cargo.toml b/wicket/Cargo.toml index 0f314569415..c344ac83abf 100644 --- a/wicket/Cargo.toml +++ b/wicket/Cargo.toml @@ -8,6 +8,7 @@ default-run = "wicket" [dependencies] anyhow.workspace = true +camino.workspace = true clap.workspace = true crossterm = { version = "0.25.0", features = ["event-stream"] } futures.workspace = true @@ -17,8 +18,10 @@ semver = { version = "1.0.16", features = ["std", "serde"] } serde.workspace = true serde_json.workspace = true sha3.workspace = true +shell-words.workspace = true slog.workspace = true slog-async.workspace = true +slog-envlogger.workspace = true slog-term.workspace = true snafu.workspace = true tar.workspace = true @@ -29,6 +32,7 @@ tui = "0.19.0" wicketd-client.workspace = true [dev-dependencies] +assert_cmd.workspace = true tempfile.workspace = true [[bin]] diff --git a/wicket/README.md b/wicket/README.md index d9ffb8019e8..667cd8bd88d 100644 --- a/wicket/README.md +++ b/wicket/README.md @@ -122,3 +122,21 @@ functionality implemented. All the inventory and power data shown in the and RSS. Lastly, we don't have a way to take rack updates and install them, or initialize the rack (including trust quorum). This is a lot of functionality that will be implemented incrementally. + +# Testing wicket as a login shell + +Wicket is meant to be used as a login shell. To test the login shell on a local Unix machine: + +1. Make the `wicket` available globally, at e.g. `/usr/local/bin/wicket`: + * If your build directory is globally readable, create a symlink to `wicket` in a well-known location. From omicron's root, run: `sudo ln -s $(readlink -f target/debug/wicket) /usr/local/bin/wicket` + * If it isn't globally accessible, run `sudo cp target/debug/wicket /usr/local/bin`. (You'll have to copy `wicket` each time you build it.) +2. Add a new user to test against, for example `wicket-test`: + 1. Add a group for the new user: `groupadd wicket-test`. + 2. Add the user: `sudo useradd -m -g wicket-test -s /usr/local/bin/wicket wicket-test` + +At this point, you can use `sudo -u wicket-test -i` (Linux) or `pfexec su - wicket-test` (illumos) to test wicket as a login shell. + +* A plain `sudo -u wicket-test -i` will show the TUI. +* `sudo -u wicket-test -i upload ...` will let you upload an artifact over stdin. + +If you'd like to test connections over ssh, add your ssh key to the new user's `.ssh/authorized_keys`, then run `ssh wicket-test@localhost [upload ...]`. diff --git a/wicket/src/bin/wicket.rs b/wicket/src/bin/wicket.rs index 5e536f24e9d..fe7877ecb09 100644 --- a/wicket/src/bin/wicket.rs +++ b/wicket/src/bin/wicket.rs @@ -2,12 +2,11 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use std::error::Error; -use wicket::Wizard; +use anyhow::Result; +use clap::Parser; +use wicket::WicketApp; -fn main() -> Result<(), Box> { - let mut wizard = Wizard::new(); - wizard.run()?; - - Ok(()) +fn main() -> Result<()> { + let app: WicketApp = Parser::parse(); + app.exec() } diff --git a/wicket/src/dispatch.rs b/wicket/src/dispatch.rs new file mode 100644 index 00000000000..e9ad4d395bd --- /dev/null +++ b/wicket/src/dispatch.rs @@ -0,0 +1,123 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Code that manages command dispatch for wicket. + +use std::net::SocketAddrV6; + +use anyhow::{bail, Context, Result}; +use camino::{Utf8Path, Utf8PathBuf}; +use clap::Parser; +use slog::Drain; + +use crate::{upload::UploadArgs, wizard::Wizard}; + +#[derive(Debug, Parser)] +#[command(version, author = "Oxide Computer Company")] +pub struct WicketApp { + /// Login shell arguments. + /// + /// Wicket is designed to be a login shell for use over ssh. If no arguments are specified, + /// wicket behaves like a TUI. However, if arguments are specified with "-c" (as in other login + /// shells e.g. bash -c), wicketd accepts an upload command. + /// + /// Login shell arguments are provided in a quoted form, so we expect a single String here. + /// This string is split using shell quoting logic to get the actual arguments. + #[arg(short = 'c', allow_hyphen_values = true)] + shell_args: Option, +} + +#[derive(Debug, Parser)] +enum ShellCommand { + /// Upload an artifact to wicketd. + Upload(UploadArgs), +} + +impl WicketApp { + /// Executes the command. + pub fn exec(self) -> Result<()> { + // TODO: make this configurable? + let wicketd_addr: SocketAddrV6 = "[::1]:8000".parse().unwrap(); + + match self.shell_args { + Some(shell_args) => { + let args = + shell_words::split(&shell_args).with_context(|| { + format!("could not parse shell arguments from input {shell_args}") + })?; + let log = setup_log(&log_path()?, WithStderr::Yes)?; + // parse_from uses the the first argument as the command name. Insert "wicket" as + // the command name. + let args = ShellCommand::parse_from( + std::iter::once("wicket".to_owned()).chain(args), + ); + match args { + ShellCommand::Upload(args) => args.exec(log, wicketd_addr), + } + } + None => { + // Do not expose standard error since it'll be on top of the TUI. + let log = setup_log(&log_path()?, WithStderr::No)?; + // Not invoked with "-c" -- run the TUI wizard. + Wizard::new(log, wicketd_addr).run() + } + } + } +} + +fn setup_log( + path: &Utf8Path, + with_stderr: WithStderr, +) -> anyhow::Result { + let file = std::fs::OpenOptions::new() + .create(true) + .write(true) + .truncate(true) + .open(path) + .with_context(|| format!("error opening log file {path}"))?; + + let decorator = slog_term::PlainDecorator::new(file); + let drain = slog_term::FullFormat::new(decorator).build().fuse(); + + let drain = match with_stderr { + WithStderr::Yes => { + let stderr_drain = stderr_env_drain("RUST_LOG"); + let drain = slog::Duplicate::new(drain, stderr_drain).fuse(); + slog_async::Async::new(drain).build().fuse() + } + WithStderr::No => slog_async::Async::new(drain).build().fuse(), + }; + + Ok(slog::Logger::root(drain, slog::o!())) +} + +#[derive(Copy, Clone, Debug)] +enum WithStderr { + Yes, + No, +} + +fn log_path() -> Result { + match std::env::var("WICKET_LOG_PATH") { + Ok(path) => Ok(path.into()), + Err(std::env::VarError::NotPresent) => Ok("/tmp/wicket.log".into()), + Err(std::env::VarError::NotUnicode(_)) => { + bail!("WICKET_LOG_PATH is not valid unicode"); + } + } +} + +fn stderr_env_drain(env_var: &str) -> impl Drain { + let stderr_decorator = slog_term::TermDecorator::new().build(); + let stderr_drain = + slog_term::FullFormat::new(stderr_decorator).build().fuse(); + let mut builder = slog_envlogger::LogBuilder::new(stderr_drain); + if let Ok(s) = std::env::var(env_var) { + builder = builder.parse(&s); + } else { + // Log at the info level by default. + builder = builder.filter(None, slog::FilterLevel::Info); + } + builder.build() +} diff --git a/wicket/src/lib.rs b/wicket/src/lib.rs index 2da59d13367..f3ccea9c7c4 100644 --- a/wicket/src/lib.rs +++ b/wicket/src/lib.rs @@ -10,11 +10,14 @@ //! in an intuitive manner. pub(crate) mod defaults; +mod dispatch; pub(crate) mod inventory; mod screens; pub mod update; +mod upload; mod wicketd; mod widgets; mod wizard; +pub use crate::dispatch::*; pub use crate::wizard::*; diff --git a/wicket/src/upload.rs b/wicket/src/upload.rs new file mode 100644 index 00000000000..9114b8a9140 --- /dev/null +++ b/wicket/src/upload.rs @@ -0,0 +1,96 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Support for uploading artifacts to wicketd. + +use std::net::SocketAddrV6; + +use anyhow::{Context, Result}; +use clap::Args; +use tokio::io::AsyncReadExt; + +use crate::wicketd::create_wicketd_client; + +#[derive(Debug, Args)] +pub(crate) struct UploadArgs { + /// Artifact name to upload + name: String, + + /// Artifact version to upload + version: String, + + /// Do not perform the upload to wicketd. + #[clap(long)] + no_upload: bool, +} + +impl UploadArgs { + pub(crate) fn exec( + self, + log: slog::Logger, + wicketd_addr: SocketAddrV6, + ) -> Result<()> { + let runtime = + tokio::runtime::Runtime::new().context("creating tokio runtime")?; + runtime.block_on(self.do_upload(log, wicketd_addr)) + } + + async fn do_upload( + &self, + log: slog::Logger, + wicketd_addr: SocketAddrV6, + ) -> Result<()> { + // Read the entire artifact from stdin into memory. + let mut artifact_bytes = Vec::new(); + tokio::io::stdin() + .read_to_end(&mut artifact_bytes) + .await + .with_context(|| { + format!( + "error reading artifact {}:{} from stdin", + self.name, self.version + ) + })?; + + let artifact_bytes_len = artifact_bytes.len(); + + slog::info!( + log, + "read artifact {}:{} ({artifact_bytes_len} bytes) from stdin", + self.name, + self.version, + ); + + // TODO: perform validation on the artifact + + if self.no_upload { + slog::info!( + log, + "not uploading artifact to wicketd (--no-upload passed in)" + ); + } else { + slog::info!(log, "uploading artifact to wicketd"); + let wicketd_client = create_wicketd_client(&log, wicketd_addr); + + wicketd_client + .put_artifact(&self.name, &self.version, artifact_bytes) + .await + .with_context(|| { + format!( + "error uploading artifact {}:{} to wicketd", + self.name, self.version, + ) + })?; + + slog::info!( + log, + "successfully uploaded {}:{} ({artifact_bytes_len} bytes) to wicketd", + self.name, + self.version, + ); + } + + Ok(()) + } +} diff --git a/wicket/src/wicketd.rs b/wicket/src/wicketd.rs index 51db0ab741f..2b8630e7d8a 100644 --- a/wicket/src/wicketd.rs +++ b/wicket/src/wicketd.rs @@ -50,22 +50,7 @@ impl WicketdManager { ) -> (WicketdHandle, WicketdManager) { let log = log.new(o!("component" => "WicketdManager")); let (tx, rx) = tokio::sync::mpsc::channel(CHANNEL_CAPACITY); - let endpoint = - format!("http://[{}]:{}", wicketd_addr.ip(), wicketd_addr.port()); - - let timeout = - std::time::Duration::from_millis(WICKETD_TIMEOUT_MS.into()); - let client = reqwest::ClientBuilder::new() - .connect_timeout(timeout) - .timeout(timeout) - .build() - .unwrap(); - - let inventory_client = wicketd_client::Client::new_with_client( - &endpoint, - client, - log.clone(), - ); + let inventory_client = create_wicketd_client(&log, wicketd_addr); let inventory = RackV1Inventory { sps: vec![] }; let handle = WicketdHandle { tx }; let manager = @@ -95,6 +80,22 @@ impl WicketdManager { } } +pub(crate) fn create_wicketd_client( + log: &Logger, + wicketd_addr: SocketAddrV6, +) -> wicketd_client::Client { + let endpoint = + format!("http://[{}]:{}", wicketd_addr.ip(), wicketd_addr.port()); + let timeout = std::time::Duration::from_millis(WICKETD_TIMEOUT_MS.into()); + let client = reqwest::ClientBuilder::new() + .connect_timeout(timeout) + .timeout(timeout) + .build() + .unwrap(); + + wicketd_client::Client::new_with_client(&endpoint, client, log.clone()) +} + async fn poll_inventory( log: &Logger, client: wicketd_client::Client, diff --git a/wicket/src/wizard.rs b/wicket/src/wizard.rs index 91025c08ac7..dec8fe1eddb 100644 --- a/wicket/src/wizard.rs +++ b/wicket/src/wizard.rs @@ -13,7 +13,7 @@ use crossterm::terminal::{ LeaveAlternateScreen, }; use futures::StreamExt; -use slog::{error, info, Drain}; +use slog::{error, info}; use std::io::{stdout, Stdout}; use std::net::SocketAddrV6; use std::sync::mpsc::{channel, Receiver, Sender}; @@ -91,10 +91,7 @@ pub struct Wizard { #[allow(clippy::new_without_default)] impl Wizard { - pub fn new() -> Wizard { - // TODO: make this configurable? - let wicketd_addr: SocketAddrV6 = "[::1]:8000".parse().unwrap(); - let log = Self::setup_log("/tmp/wicket.log").unwrap(); + pub fn new(log: slog::Logger, wicketd_addr: SocketAddrV6) -> Wizard { let screens = Screens::new(&log); let (events_tx, events_rx) = channel(); let state = State::new(); @@ -120,20 +117,6 @@ impl Wizard { } } - pub fn setup_log(path: &str) -> anyhow::Result { - let file = std::fs::OpenOptions::new() - .create(true) - .write(true) - .truncate(true) - .open(path)?; - - let decorator = slog_term::PlainDecorator::new(file); - let drain = slog_term::FullFormat::new(decorator).build().fuse(); - let drain = slog_async::Async::new(drain).build().fuse(); - - Ok(slog::Logger::root(drain, slog::o!())) - } - pub fn run(&mut self) -> anyhow::Result<()> { self.start_tokio_runtime(); enable_raw_mode()?; diff --git a/wicket/tests/integration_tests/command_tests.rs b/wicket/tests/integration_tests/command_tests.rs new file mode 100644 index 00000000000..3535e95f1ed --- /dev/null +++ b/wicket/tests/integration_tests/command_tests.rs @@ -0,0 +1,32 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::path::Path; + +use assert_cmd::Command; + +#[test] +fn test_wicket_shell_like() { + let tempdir = tempfile::tempdir().unwrap(); + + let mut cmd = make_cmd(tempdir.path()); + cmd.args(["-c", "help"]); + cmd.assert().success(); + + let mut cmd = make_cmd(tempdir.path()); + cmd.args(["-c", "--help"]); + cmd.assert().success(); + + let mut cmd = make_cmd(tempdir.path()); + cmd.args(["-c", "upload foo 0.1.0 --no-upload"]).write_stdin("upload-test"); + cmd.assert().success(); +} + +fn make_cmd(tempdir: &Path) -> Command { + let mut cmd = Command::cargo_bin("wicket").unwrap(); + // Set the log path to the temp dir, because the default is to log to + // /tmp/wicket.log (which might be owned by a different user). + cmd.env("WICKET_LOG_PATH", tempdir.join("wicket.log")); + cmd +} diff --git a/wicket/tests/integration_tests/mod.rs b/wicket/tests/integration_tests/mod.rs new file mode 100644 index 00000000000..fdf11b8cd75 --- /dev/null +++ b/wicket/tests/integration_tests/mod.rs @@ -0,0 +1,5 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +mod command_tests; diff --git a/wicket/tests/mod.rs b/wicket/tests/mod.rs new file mode 100644 index 00000000000..42343f04f9e --- /dev/null +++ b/wicket/tests/mod.rs @@ -0,0 +1,17 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Integration tests for the wicket client. +//! +//! Why use this weird layer of indirection, you might ask? Cargo chooses to +//! compile *each file* within the "tests/" subdirectory as a separate crate. +//! This means that doing "file-granularity" conditional compilation is +//! difficult, since a file like "test_for_illumos_only.rs" would get compiled +//! and tested regardless of the contents of "mod.rs". +//! +//! However, by lumping all tests into a submodule, all integration tests are +//! joined into a single crate, which itself can filter individual files +//! by (for example) choice of target OS. + +mod integration_tests; From 5935d13cd70e24f713eacca8c0f7c23f4716f54f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Jan 2023 09:20:41 -0800 Subject: [PATCH 69/80] Bump trybuild from 1.0.75 to 1.0.76 (#2172) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [trybuild](https://github.com/dtolnay/trybuild) from 1.0.75 to 1.0.76.
Release notes

Sourced from trybuild's releases.

1.0.76

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=trybuild&package-manager=cargo&previous-version=1.0.75&new-version=1.0.76)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a2b0e899973..36912caa6a6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6665,9 +6665,9 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "trybuild" -version = "1.0.75" +version = "1.0.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1212c215a87a183687a7cc7065901b1a98da6b37277d51a1b5faedbb4efd4f3" +checksum = "6ed2c57956f91546d4d33614265a85d55c8e1ab91484853a10335894786d7db6" dependencies = [ "glob", "once_cell", diff --git a/Cargo.toml b/Cargo.toml index 98882ce81a5..d49771c6a00 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -247,7 +247,7 @@ trust-dns-client = "0.22" trust-dns-proto = "0.22" trust-dns-resolver = "0.22" trust-dns-server = "0.22" -trybuild = "1.0.75" +trybuild = "1.0.76" uuid = { version = "1.2.2", features = ["serde", "v4"] } usdt = "0.3" vsss-rs = { version = "2.0.0", default-features = false, features = ["std"] } From a02eca730170c78471984cd77128c139d48a62f1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Jan 2023 10:19:24 -0800 Subject: [PATCH 70/80] Bump clap from 4.0.32 to 4.1.1 (#2168) Bumps [clap](https://github.com/clap-rs/clap) from 4.0.32 to 4.1.1.
Release notes

Sourced from clap's releases.

v4.1.1

[4.1.1] - 2023-01-14

Fixes

  • (error) Small softening attempt for "unexpected argument" error
Changelog

Sourced from clap's changelog.

[4.1.1] - 2023-01-14

Fixes

  • (error) Small softening attempt for "unexpected argument" error

[4.1.0] - 2023-01-13

Compatibility

MSRV changed to 1.64.0

For apps with custom --help and --version flags:

  • Descriptions for --help and --version changed

When apps have errors imitating clap's error style:

  • Error message style was changed, including
    • Moving away from "did you mean" to tips
    • Leading letter is lower case
    • "For more" added some punctuation

Features

  • ArgMatches::get_occurrences support for argument values to be grouped by their occurrence

Fixes

  • (derive) Allow upgrade_from when arguments / subcommands are explicitly marked as required
  • (help) Try be more clearer and succinct with --help and --version (also helps with overflow)
  • (error) Try to be more clearer and succinct with error messages
  • (error) Officially adopt an error style guide
Commits
  • 74a82d7 chore: Release
  • 06f392a docs: Update changelog
  • 4d913fa Merge pull request #4639 from epage/error
  • 162a556 fix(error): Try to soften unexpected argument/value errors
  • 34d856b chore: Release
  • 889ca7a chore: Bump versions for 4.1
  • 2bafb9b docs(contrib): Define a compatibility policy for help/error output
  • a41ca2e docs: Update changelog
  • 523adc2 Merge pull request #4635 from epage/stablize
  • b4f111a feat: Stablize ArgMatches::get_occurrences
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=clap&package-manager=cargo&previous-version=4.0.32&new-version=4.1.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Adam H. Leventhal --- Cargo.lock | 42 +++++++++---------- Cargo.toml | 2 +- nexus/tests/output/cmd-nexus-noargs-stderr | 4 +- .../tests/output/cmd-oximeter-noargs-stderr | 2 +- .../tests/output/cmd-sled-agent-noargs-stderr | 4 +- .../output/cmd-sled-agent-sim-noargs-stderr | 4 +- .../output/cmd-omicron-dev-bad-cmd-stderr | 4 +- .../cmd-omicron-dev-db-populate-noargs-stderr | 4 +- .../cmd-omicron-dev-db-wipe-noargs-stderr | 4 +- .../output/cmd-omicron-dev-noargs-stderr | 4 +- 10 files changed, 37 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 36912caa6a6..87ae1e6c98b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -765,12 +765,12 @@ dependencies = [ [[package]] name = "clap" -version = "4.0.32" +version = "4.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7db700bc935f9e43e88d00b0850dae18a63773cfbec6d8e070fccf7fef89a39" +checksum = "4ec7a4128863c188deefe750ac1d1dfe66c236909f845af04beed823638dc1b2" dependencies = [ "bitflags", - "clap_derive 4.0.21", + "clap_derive 4.1.0", "clap_lex 0.3.0", "is-terminal", "once_cell", @@ -793,9 +793,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.0.21" +version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0177313f9f02afc995627906bbd8967e2be069f5261954222dac78290c2b9014" +checksum = "684a277d672e91966334af371f1a7b5833f9aa00b07c84e92fbce95e00208ce8" dependencies = [ "heck", "proc-macro-error", @@ -2027,7 +2027,7 @@ name = "gateway-cli" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.0.32", + "clap 4.1.1", "futures", "gateway-client", "libc", @@ -2586,7 +2586,7 @@ dependencies = [ "buf-list", "bytes", "camino", - "clap 4.0.32", + "clap 4.1.1", "ddm-admin-client", "display-error-chain", "futures", @@ -2624,7 +2624,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "clap 4.0.32", + "clap 4.1.1", "dropshot", "expectorate", "hyper", @@ -2654,7 +2654,7 @@ name = "internal-dns" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.0.32", + "clap 4.1.1", "dropshot", "expectorate", "internal-dns-client", @@ -3441,7 +3441,7 @@ name = "omicron-deploy" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.0.32", + "clap 4.1.1", "crossbeam", "omicron-package", "omicron-sled-agent", @@ -3458,7 +3458,7 @@ name = "omicron-gateway" version = "0.1.0" dependencies = [ "async-trait", - "clap 4.0.32", + "clap 4.1.1", "dropshot", "expectorate", "futures", @@ -3500,7 +3500,7 @@ dependencies = [ "base64 0.20.0", "bb8", "chrono", - "clap 4.0.32", + "clap 4.1.1", "cookie", "criterion", "crucible-agent-client", @@ -3586,7 +3586,7 @@ name = "omicron-package" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.0.32", + "clap 4.1.1", "futures", "hex", "indicatif", @@ -3624,7 +3624,7 @@ dependencies = [ "bytes", "cfg-if 1.0.0", "chrono", - "clap 4.0.32", + "clap 4.1.1", "crucible-agent-client", "crucible-client-types", "ddm-admin-client", @@ -3684,7 +3684,7 @@ name = "omicron-test-utils" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.0.32", + "clap 4.1.1", "dropshot", "expectorate", "futures", @@ -3944,7 +3944,7 @@ dependencies = [ name = "oximeter-collector" version = "0.1.0" dependencies = [ - "clap 4.0.32", + "clap 4.1.1", "dropshot", "expectorate", "internal-dns-client", @@ -3975,7 +3975,7 @@ dependencies = [ "async-trait", "bytes", "chrono", - "clap 4.0.32", + "clap 4.1.1", "dropshot", "itertools", "omicron-test-utils", @@ -4564,7 +4564,7 @@ version = "0.2.1-dev" source = "git+https://github.com/oxidecomputer/progenitor?branch=main#634bf98b053cc494191365e2072b3580f4b03df6" dependencies = [ "anyhow", - "clap 4.0.32", + "clap 4.1.1", "openapiv3", "progenitor-client", "progenitor-impl", @@ -5911,7 +5911,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "clap 4.0.32", + "clap 4.1.1", "dropshot", "futures", "gateway-messages", @@ -7138,7 +7138,7 @@ dependencies = [ "anyhow", "assert_cmd", "camino", - "clap 4.0.32", + "clap 4.1.1", "crossterm", "futures", "hex", @@ -7168,7 +7168,7 @@ dependencies = [ "async-trait", "buf-list", "bytes", - "clap 4.0.32", + "clap 4.1.1", "debug-ignore", "dropshot", "expectorate", diff --git a/Cargo.toml b/Cargo.toml index d49771c6a00..928944e1aa8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -102,7 +102,7 @@ bytes = "1.3.0" camino = "1.1" cfg-if = "1.0" chrono = { version = "0.4", features = [ "serde" ] } -clap = { version = "4.0", features = ["derive"] } +clap = { version = "4.1", features = ["derive"] } cookie = "0.16" criterion = { version = "0.4", features = [ "async_tokio" ] } crossbeam = "0.8" diff --git a/nexus/tests/output/cmd-nexus-noargs-stderr b/nexus/tests/output/cmd-nexus-noargs-stderr index ac203ced652..f371553325d 100644 --- a/nexus/tests/output/cmd-nexus-noargs-stderr +++ b/nexus/tests/output/cmd-nexus-noargs-stderr @@ -1,6 +1,6 @@ -error: The following required arguments were not provided: +error: the following required arguments were not provided: Usage: nexus -For more information try '--help' +For more information, try '--help'. diff --git a/oximeter/collector/tests/output/cmd-oximeter-noargs-stderr b/oximeter/collector/tests/output/cmd-oximeter-noargs-stderr index d2609b1ec05..7b736fe8a12 100644 --- a/oximeter/collector/tests/output/cmd-oximeter-noargs-stderr +++ b/oximeter/collector/tests/output/cmd-oximeter-noargs-stderr @@ -8,4 +8,4 @@ Commands: help Print this message or the help of the given subcommand(s) Options: - -h, --help Print help information + -h, --help Print help diff --git a/sled-agent/tests/output/cmd-sled-agent-noargs-stderr b/sled-agent/tests/output/cmd-sled-agent-noargs-stderr index 48fb7910c3d..ee397c0ef7c 100644 --- a/sled-agent/tests/output/cmd-sled-agent-noargs-stderr +++ b/sled-agent/tests/output/cmd-sled-agent-noargs-stderr @@ -8,5 +8,5 @@ Commands: help Print this message or the help of the given subcommand(s) Options: - -h, --help Print help information - -V, --version Print version information + -h, --help Print help + -V, --version Print version diff --git a/sled-agent/tests/output/cmd-sled-agent-sim-noargs-stderr b/sled-agent/tests/output/cmd-sled-agent-sim-noargs-stderr index 3d9c4ccce1f..9aee5c4e5ad 100644 --- a/sled-agent/tests/output/cmd-sled-agent-sim-noargs-stderr +++ b/sled-agent/tests/output/cmd-sled-agent-sim-noargs-stderr @@ -1,8 +1,8 @@ -error: The following required arguments were not provided: +error: the following required arguments were not provided: Usage: sled-agent-sim -For more information try '--help' +For more information, try '--help'. diff --git a/test-utils/tests/output/cmd-omicron-dev-bad-cmd-stderr b/test-utils/tests/output/cmd-omicron-dev-bad-cmd-stderr index d92b0477463..b18526db3aa 100644 --- a/test-utils/tests/output/cmd-omicron-dev-bad-cmd-stderr +++ b/test-utils/tests/output/cmd-omicron-dev-bad-cmd-stderr @@ -1,5 +1,5 @@ -error: The subcommand 'bogus-command' wasn't recognized +error: unrecognized subcommand 'bogus-command' Usage: omicron-dev -For more information try '--help' +For more information, try '--help'. diff --git a/test-utils/tests/output/cmd-omicron-dev-db-populate-noargs-stderr b/test-utils/tests/output/cmd-omicron-dev-db-populate-noargs-stderr index e049ed5bfcc..f8276da1688 100644 --- a/test-utils/tests/output/cmd-omicron-dev-db-populate-noargs-stderr +++ b/test-utils/tests/output/cmd-omicron-dev-db-populate-noargs-stderr @@ -1,6 +1,6 @@ -error: The following required arguments were not provided: +error: the following required arguments were not provided: --database-url Usage: omicron-dev db-populate --database-url -For more information try '--help' +For more information, try '--help'. diff --git a/test-utils/tests/output/cmd-omicron-dev-db-wipe-noargs-stderr b/test-utils/tests/output/cmd-omicron-dev-db-wipe-noargs-stderr index 8f9d2531a65..6c8af1aa471 100644 --- a/test-utils/tests/output/cmd-omicron-dev-db-wipe-noargs-stderr +++ b/test-utils/tests/output/cmd-omicron-dev-db-wipe-noargs-stderr @@ -1,6 +1,6 @@ -error: The following required arguments were not provided: +error: the following required arguments were not provided: --database-url Usage: omicron-dev db-wipe --database-url -For more information try '--help' +For more information, try '--help'. diff --git a/test-utils/tests/output/cmd-omicron-dev-noargs-stderr b/test-utils/tests/output/cmd-omicron-dev-noargs-stderr index d57c75b30ea..6573c3ff777 100644 --- a/test-utils/tests/output/cmd-omicron-dev-noargs-stderr +++ b/test-utils/tests/output/cmd-omicron-dev-noargs-stderr @@ -10,5 +10,5 @@ Commands: help Print this message or the help of the given subcommand(s) Options: - -h, --help Print help information - -V, --version Print version information + -h, --help Print help + -V, --version Print version From b1f3879c9e6efeb6fcd314fd3daaa5d450b3c698 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Jan 2023 16:33:27 -0800 Subject: [PATCH 71/80] Bump debug-ignore from 1.0.3 to 1.0.5 (#2173) Bumps [debug-ignore](https://github.com/sunshowers-code/debug-ignore) from 1.0.3 to 1.0.5.
Release notes

Sourced from debug-ignore's releases.

debug-ignore 1.0.5

Fixed

  • Fixed link to documentation off of main in readme.

debug-ignore 1.0.4

Added

  • Add a From impl for DebugIgnore (#3).

#3: sunshowers-code/debug-ignore#3

Changelog

Sourced from debug-ignore's changelog.

[1.0.5] - 2023-01-07

Fixed

  • Fixed link to documentation off of main in readme.

[1.0.4] - 2023-01-07

Added

  • Add a From impl for DebugIgnore (#3).

#3: sunshowers-code/debug-ignore#3

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=debug-ignore&package-manager=cargo&previous-version=1.0.3&new-version=1.0.5)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 87ae1e6c98b..f566d8c9dbc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1310,9 +1310,9 @@ dependencies = [ [[package]] name = "debug-ignore" -version = "1.0.3" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b48b0b49e2f473c499ddcd133e78f0f2629aaa997ee61adadb2d1753e6af4cf" +checksum = "ffe7ed1d93f4553003e20b629abe9085e1e81b1429520f897f8f8860bc6dfc21" [[package]] name = "der" diff --git a/Cargo.toml b/Cargo.toml index 928944e1aa8..a66238f59a4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -111,7 +111,7 @@ crucible-client-types = { git = "https://github.com/oxidecomputer/crucible", rev display-error-chain = "0.1.1" ddm-admin-client = { path = "ddm-admin-client" } db-macros = { path = "nexus/db-macros" } -debug-ignore = "1.0.3" +debug-ignore = "1.0.5" derive_more = "0.99.17" diesel = { version = "2.0.2" } diesel-dtrace = { git = "https://github.com/oxidecomputer/diesel-dtrace", rev = "18748d9f76c94e1f4400fbec0859b3e77a221a8d" } From 64768278ca7f02a959c642c8f4630e5920b49318 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Jan 2023 16:33:40 -0800 Subject: [PATCH 72/80] Bump dropshot from `ce5deee` to `120e168` (#2170) Bumps [dropshot](https://github.com/oxidecomputer/dropshot) from `ce5deee` to `120e168`.
Commits
  • 120e168 Bump usdt dependency to support stable toolchains (#522)
  • 3b1195d Update Rust to v1.66.1 (#559)
  • ebf16d0 Better detection of features necessary for USDT probes (#561)
  • 3ffc643 move some api_description functions into new schema_util (#560)
  • bf64ab7 move extractors and generic schema utilities out of handler.rs (#554)
  • See full diff in compare view

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f566d8c9dbc..da943b5d851 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1527,7 +1527,7 @@ checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" [[package]] name = "dropshot" version = "0.8.1-dev" -source = "git+https://github.com/oxidecomputer/dropshot?branch=main#ce5deee415cd1734b07342640fa09af8e7c3a7d0" +source = "git+https://github.com/oxidecomputer/dropshot?branch=main#120e168525018dca4c9c3bed47d398c55fdd4d6b" dependencies = [ "async-stream", "async-trait", @@ -1568,7 +1568,7 @@ dependencies = [ [[package]] name = "dropshot_endpoint" version = "0.8.1-dev" -source = "git+https://github.com/oxidecomputer/dropshot?branch=main#ce5deee415cd1734b07342640fa09af8e7c3a7d0" +source = "git+https://github.com/oxidecomputer/dropshot?branch=main#120e168525018dca4c9c3bed47d398c55fdd4d6b" dependencies = [ "proc-macro2", "quote", From a10f9306579416482b6a331627ae9b7e0a7b3621 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Jan 2023 18:22:43 -0800 Subject: [PATCH 73/80] Bump progenitor from `634bf98` to `1ef131a` (#2174) Bumps [progenitor](https://github.com/oxidecomputer/progenitor) from `634bf98` to `1ef131a`.
Commits

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Adam H. Leventhal --- Cargo.lock | 129 ++++++++++++++++++++++-- Cargo.toml | 4 +- end-to-end-tests/src/instance_launch.rs | 9 +- nexus/src/app/instance.rs | 3 +- nexus/src/app/sagas/disk_create.rs | 19 ++-- nexus/src/app/sagas/snapshot_create.rs | 19 ++-- nexus/src/app/silo.rs | 19 ++-- nexus/src/authn/silos.rs | 31 +++--- nexus/src/external_api/console_api.rs | 10 +- nexus/tests/integration_tests/saml.rs | 48 +++++---- nexus/tests/integration_tests/silos.rs | 36 +++---- nexus/types/src/external_api/params.rs | 43 +++++--- package-manifest.toml | 4 +- 13 files changed, 265 insertions(+), 109 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index da943b5d851..78a4925ac1f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -884,6 +884,16 @@ dependencies = [ "version_check", ] +[[package]] +name = "core-foundation" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "core-foundation-sys" version = "0.8.3" @@ -1691,7 +1701,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "base64 0.20.0", + "base64 0.21.0", "omicron-sled-agent", "omicron-test-utils", "oxide-client", @@ -2461,6 +2471,19 @@ dependencies = [ "tokio-rustls", ] +[[package]] +name = "hyper-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes", + "hyper", + "native-tls", + "tokio", + "tokio-native-tls", +] + [[package]] name = "iana-time-zone" version = "0.1.53" @@ -3077,6 +3100,24 @@ dependencies = [ "syn", ] +[[package]] +name = "native-tls" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +dependencies = [ + "lazy_static", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + [[package]] name = "nb" version = "0.1.3" @@ -3237,7 +3278,7 @@ version = "0.1.0" dependencies = [ "anyhow", "api_identity", - "base64 0.20.0", + "base64 0.21.0", "chrono", "nexus-passwords", "omicron-common", @@ -3497,7 +3538,7 @@ dependencies = [ "async-bb8-diesel", "async-trait", "authz-macros", - "base64 0.20.0", + "base64 0.21.0", "bb8", "chrono", "clap 4.1.1", @@ -3886,7 +3927,7 @@ dependencies = [ name = "oxide-client" version = "0.1.0" dependencies = [ - "base64 0.20.0", + "base64 0.21.0", "chrono", "futures", "progenitor", @@ -4561,7 +4602,7 @@ dependencies = [ [[package]] name = "progenitor" version = "0.2.1-dev" -source = "git+https://github.com/oxidecomputer/progenitor?branch=main#634bf98b053cc494191365e2072b3580f4b03df6" +source = "git+https://github.com/oxidecomputer/progenitor?branch=main#1ef131a244cf30b9c4213fdb31674fe071cb0043" dependencies = [ "anyhow", "clap 4.1.1", @@ -4571,12 +4612,13 @@ dependencies = [ "progenitor-macro", "serde", "serde_json", + "serde_yaml", ] [[package]] name = "progenitor-client" version = "0.2.1-dev" -source = "git+https://github.com/oxidecomputer/progenitor?branch=main#634bf98b053cc494191365e2072b3580f4b03df6" +source = "git+https://github.com/oxidecomputer/progenitor?branch=main#1ef131a244cf30b9c4213fdb31674fe071cb0043" dependencies = [ "bytes", "futures-core", @@ -4590,7 +4632,7 @@ dependencies = [ [[package]] name = "progenitor-impl" version = "0.2.1-dev" -source = "git+https://github.com/oxidecomputer/progenitor?branch=main#634bf98b053cc494191365e2072b3580f4b03df6" +source = "git+https://github.com/oxidecomputer/progenitor?branch=main#1ef131a244cf30b9c4213fdb31674fe071cb0043" dependencies = [ "getopts", "heck", @@ -4612,7 +4654,7 @@ dependencies = [ [[package]] name = "progenitor-macro" version = "0.2.1-dev" -source = "git+https://github.com/oxidecomputer/progenitor?branch=main#634bf98b053cc494191365e2072b3580f4b03df6" +source = "git+https://github.com/oxidecomputer/progenitor?branch=main#1ef131a244cf30b9c4213fdb31674fe071cb0043" dependencies = [ "openapiv3", "proc-macro2", @@ -4622,15 +4664,16 @@ dependencies = [ "serde", "serde_json", "serde_tokenstream", + "serde_yaml", "syn", ] [[package]] name = "propolis-client" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=666ded451b13bba0895485c0b34515c0e59c2c6e#666ded451b13bba0895485c0b34515c0e59c2c6e" +source = "git+https://github.com/oxidecomputer/propolis?rev=92508d573529a1ee50a9422fbca045a5e980a2b5#92508d573529a1ee50a9422fbca045a5e980a2b5" dependencies = [ - "base64 0.13.1", + "base64 0.21.0", "crucible-client-types", "progenitor", "propolis_types", @@ -4649,7 +4692,7 @@ dependencies = [ [[package]] name = "propolis_types" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=666ded451b13bba0895485c0b34515c0e59c2c6e#666ded451b13bba0895485c0b34515c0e59c2c6e" +source = "git+https://github.com/oxidecomputer/propolis?rev=92508d573529a1ee50a9422fbca045a5e980a2b5#92508d573529a1ee50a9422fbca045a5e980a2b5" dependencies = [ "schemars", "serde", @@ -4969,10 +5012,12 @@ dependencies = [ "http-body", "hyper", "hyper-rustls", + "hyper-tls", "ipnet", "js-sys", "log", "mime", + "native-tls", "once_cell", "percent-encoding", "pin-project-lite", @@ -4982,6 +5027,7 @@ dependencies = [ "serde_json", "serde_urlencoded", "tokio", + "tokio-native-tls", "tokio-rustls", "tokio-util", "tower-service", @@ -5307,6 +5353,15 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "schannel" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" +dependencies = [ + "windows-sys", +] + [[package]] name = "scheduled-thread-pool" version = "0.2.6" @@ -5365,6 +5420,29 @@ dependencies = [ "untrusted", ] +[[package]] +name = "security-framework" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" +dependencies = [ + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "semver" version = "0.1.20" @@ -5554,6 +5632,19 @@ dependencies = [ "syn", ] +[[package]] +name = "serde_yaml" +version = "0.9.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92b5b431e8907b50339b51223b97d102db8d987ced36f6e4d03621db9316c834" +dependencies = [ + "indexmap", + "itoa", + "ryu", + "serde", + "unsafe-libyaml", +] + [[package]] name = "serial_test" version = "0.10.0" @@ -6410,6 +6501,16 @@ dependencies = [ "syn", ] +[[package]] +name = "tokio-native-tls" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-postgres" version = "0.7.7" @@ -6832,6 +6933,12 @@ dependencies = [ "subtle", ] +[[package]] +name = "unsafe-libyaml" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc7ed8ba44ca06be78ea1ad2c3682a43349126c8818054231ee6f4748012aed2" + [[package]] name = "untrusted" version = "0.7.1" diff --git a/Cargo.toml b/Cargo.toml index a66238f59a4..c7318985225 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -93,7 +93,7 @@ async-bb8-diesel = { git = "https://github.com/oxidecomputer/async-bb8-diesel", async-trait = "0.1.60" authz-macros = { path = "nexus/authz-macros" } backoff = { version = "0.4.0", features = [ "tokio" ] } -base64 = "0.20.0" +base64 = "0.21.0" bb8 = "0.8.0" bcs = "0.1.4" bincode = "1.3.3" @@ -185,7 +185,7 @@ pretty-hex = "0.3.0" proc-macro2 = "1.0" progenitor = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } progenitor-client = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } -propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "666ded451b13bba0895485c0b34515c0e59c2c6e", features = [ "generated-migration" ] } +propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "92508d573529a1ee50a9422fbca045a5e980a2b5", features = [ "generated-migration" ] } proptest = "1.0.0" quote = "1.0" rand = "0.8.5" diff --git a/end-to-end-tests/src/instance_launch.rs b/end-to-end-tests/src/instance_launch.rs index aa95907f7d0..d347efa42c5 100644 --- a/end-to-end-tests/src/instance_launch.rs +++ b/end-to-end-tests/src/instance_launch.rs @@ -156,8 +156,13 @@ async fn instance_launch() -> Result<()> { .and_then(|line| line.split_whitespace().nth(1)) .context("failed to get SSH host key from serial console")?; eprintln!("host key: ssh-ed25519 {}", host_key); - let host_key = - PublicKey::parse(b"ssh-ed25519", &base64::decode(host_key)?)?; + let host_key = PublicKey::parse( + b"ssh-ed25519", + &base64::Engine::decode( + &base64::engine::general_purpose::STANDARD, + host_key, + )?, + )?; eprintln!("connecting ssh"); let mut session = russh::client::connect( diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index ea4ed2cfe90..f2c8350b953 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -613,7 +613,8 @@ impl super::Nexus { external_ips, firewall_rules, disks: disk_reqs, - cloud_init_bytes: Some(base64::encode( + cloud_init_bytes: Some(base64::Engine::encode( + &base64::engine::general_purpose::STANDARD, db_instance.generate_cidata(&public_keys)?, )), }; diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index ce16eee8bdf..b1c1139c973 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -447,14 +447,17 @@ async fn sdc_regions_ensure( flush_timeout: None, // all downstairs will expect encrypted blocks - key: Some(base64::encode({ - // TODO the current encryption key - // requirement is 32 bytes, what if that - // changes? - let mut random_bytes: [u8; 32] = [0; 32]; - rng.fill_bytes(&mut random_bytes); - random_bytes - })), + key: Some(base64::Engine::encode( + &base64::engine::general_purpose::STANDARD, + { + // TODO the current encryption key + // requirement is 32 bytes, what if that + // changes? + let mut random_bytes: [u8; 32] = [0; 32]; + rng.fill_bytes(&mut random_bytes); + random_bytes + }, + )), // TODO TLS, which requires sending X509 stuff during // downstairs region allocation too. diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index 65a7cda00ac..50655ec2dbd 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -337,14 +337,17 @@ async fn ssc_regions_ensure( flush_timeout: None, // all downstairs will expect encrypted blocks - key: Some(base64::encode({ - // TODO the current encryption key - // requirement is 32 bytes, what if that - // changes? - let mut random_bytes: [u8; 32] = [0; 32]; - rng.fill_bytes(&mut random_bytes); - random_bytes - })), + key: Some(base64::Engine::encode( + &base64::engine::general_purpose::STANDARD, + { + // TODO the current encryption key + // requirement is 32 bytes, what if that + // changes? + let mut random_bytes: [u8; 32] = [0; 32]; + rng.fill_bytes(&mut random_bytes); + random_bytes + }, + )), // TODO TLS, which requires sending X509 stuff during // downstairs region allocation too. diff --git a/nexus/src/app/silo.rs b/nexus/src/app/silo.rs index d06c4f123e4..e2217ad8c43 100644 --- a/nexus/src/app/silo.rs +++ b/nexus/src/app/silo.rs @@ -746,14 +746,17 @@ impl super::Nexus { } params::IdpMetadataSource::Base64EncodedXml { data } => { - let bytes = - base64::decode(data).map_err(|e| Error::InvalidValue { - label: String::from("data"), - message: format!( - "error getting decoding base64 data: {}", - e - ), - })?; + let bytes = base64::Engine::decode( + &base64::engine::general_purpose::STANDARD, + data, + ) + .map_err(|e| Error::InvalidValue { + label: String::from("data"), + message: format!( + "error getting decoding base64 data: {}", + e + ), + })?; String::from_utf8_lossy(&bytes).into_owned() } }; diff --git a/nexus/src/authn/silos.rs b/nexus/src/authn/silos.rs index 337e82e1dfb..ff1ae711334 100644 --- a/nexus/src/authn/silos.rs +++ b/nexus/src/authn/silos.rs @@ -11,6 +11,8 @@ use crate::db::{model, DataStore}; use omicron_common::api::external::LookupResult; use anyhow::{anyhow, Result}; +use base64::Engine; +use dropshot::HttpError; use samael::metadata::ContactPerson; use samael::metadata::ContactType; use samael::metadata::EntityDescriptor; @@ -19,8 +21,6 @@ use samael::metadata::HTTP_REDIRECT_BINDING; use samael::schema::Response as SAMLResponse; use samael::service_provider::ServiceProvider; use samael::service_provider::ServiceProviderBuilder; - -use dropshot::HttpError; use serde::{Deserialize, Serialize}; #[derive(Deserialize)] @@ -209,7 +209,10 @@ impl SamlIdentityProvider { fn public_cert_bytes(&self) -> Result>> { if let Some(cert) = &self.public_cert { - Ok(Some(base64::decode(cert.as_bytes())?)) + Ok(Some( + base64::engine::general_purpose::STANDARD + .decode(cert.as_bytes())?, + )) } else { Ok(None) } @@ -217,7 +220,10 @@ impl SamlIdentityProvider { fn private_key_bytes(&self) -> Result>> { if let Some(key) = &self.private_key { - Ok(Some(base64::decode(key.as_bytes())?)) + Ok(Some( + base64::engine::general_purpose::STANDARD + .decode(key.as_bytes())?, + )) } else { Ok(None) } @@ -240,15 +246,14 @@ impl SamlIdentityProvider { ) })?; - let raw_response_bytes = base64::decode( - saml_post.saml_response.as_bytes(), - ) - .map_err(|e| { - HttpError::for_bad_request( - None, - format!("error base64 decoding SAMLResponse! {}", e), - ) - })?; + let raw_response_bytes = base64::engine::general_purpose::STANDARD + .decode(saml_post.saml_response.as_bytes()) + .map_err(|e| { + HttpError::for_bad_request( + None, + format!("error base64 decoding SAMLResponse! {}", e), + ) + })?; // This base64 decoded string is the SAMLResponse XML. Be aware that // parsing unauthenticated arbitrary XML is garbage and a source of diff --git a/nexus/src/external_api/console_api.rs b/nexus/src/external_api/console_api.rs index bb9a0897e29..34bfc75680e 100644 --- a/nexus/src/external_api/console_api.rs +++ b/nexus/src/external_api/console_api.rs @@ -227,7 +227,8 @@ pub struct RelayState { impl RelayState { pub fn to_encoded(&self) -> Result { - Ok(base64::encode( + Ok(base64::Engine::encode( + &base64::engine::general_purpose::STANDARD, serde_json::to_string(&self).context("encoding relay state")?, )) } @@ -235,8 +236,11 @@ impl RelayState { pub fn from_encoded(encoded: String) -> Result { serde_json::from_str( &String::from_utf8( - base64::decode(encoded) - .context("base64 decoding relay state")?, + base64::Engine::decode( + &base64::engine::general_purpose::STANDARD, + encoded, + ) + .context("base64 decoding relay state")?, ) .context("creating relay state string")?, ) diff --git a/nexus/tests/integration_tests/saml.rs b/nexus/tests/integration_tests/saml.rs index 2e85c615081..a5b396ab2b2 100644 --- a/nexus/tests/integration_tests/saml.rs +++ b/nexus/tests/integration_tests/saml.rs @@ -5,6 +5,8 @@ use std::fmt::Debug; use nexus_test_utils::http_testing::{AuthnMode, NexusRequest, RequestBuilder}; +use nexus_test_utils::resource_helpers::{create_silo, object_create}; +use nexus_test_utils_macros::nexus_test; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_nexus::authn::silos::{ IdentityProviderType, SamlIdentityProvider, SamlLoginPost, @@ -14,13 +16,10 @@ use omicron_nexus::external_api::views::{self, Silo}; use omicron_nexus::external_api::{params, shared}; use omicron_nexus::TestInterfaces; +use base64::Engine; +use dropshot::ResultsPage; use http::method::Method; use http::StatusCode; -use nexus_test_utils::resource_helpers::{create_silo, object_create}; - -use nexus_test_utils_macros::nexus_test; - -use dropshot::ResultsPage; use httptest::{matchers::*, responders::*, Expectation, Server}; use uuid::Uuid; @@ -488,12 +487,14 @@ async fn test_saml_idp_reject_keypair(cptestctx: &ControlPlaneTestContext) { // Reject signing keypair if the certificate or key is base64 encoded // but not valid params::DerEncodedKeyPair { - public_cert: base64::encode("not a cert"), + public_cert: base64::engine::general_purpose::STANDARD + .encode("not a cert"), private_key: RSA_KEY_1_PRIVATE.to_string(), }, params::DerEncodedKeyPair { public_cert: RSA_KEY_1_PUBLIC.to_string(), - private_key: base64::encode("not a cert"), + private_key: base64::engine::general_purpose::STANDARD + .encode("not a cert"), }, // Reject signing keypair if cert and key are swapped params::DerEncodedKeyPair { @@ -644,7 +645,8 @@ fn test_correct_saml_response() { }; let body_bytes = serde_urlencoded::to_string(SamlLoginPost { - saml_response: base64::encode(&SAML_RESPONSE), + saml_response: base64::engine::general_purpose::STANDARD + .encode(&SAML_RESPONSE), relay_state: None, }) .unwrap(); @@ -690,7 +692,8 @@ fn test_correct_saml_response_ecdsa_sha256() { }; let body_bytes = serde_urlencoded::to_string(SamlLoginPost { - saml_response: base64::encode(&SAML_RESPONSE_SIGNED_WITH_ECDSA_SHA256), + saml_response: base64::engine::general_purpose::STANDARD + .encode(&SAML_RESPONSE_SIGNED_WITH_ECDSA_SHA256), relay_state: None, }) .unwrap(); @@ -735,7 +738,8 @@ fn test_accept_saml_response_only_assertion_signed() { }; let body_bytes = serde_urlencoded::to_string(SamlLoginPost { - saml_response: base64::encode(&SAML_RESPONSE_ONLY_ASSERTION_SIGNED), + saml_response: base64::engine::general_purpose::STANDARD + .encode(&SAML_RESPONSE_ONLY_ASSERTION_SIGNED), relay_state: None, }) .unwrap(); @@ -774,7 +778,8 @@ fn test_reject_unsigned_saml_response() { }; let body_bytes = serde_urlencoded::to_string(SamlLoginPost { - saml_response: base64::encode(&SAML_RESPONSE_UNSIGNED), + saml_response: base64::engine::general_purpose::STANDARD + .encode(&SAML_RESPONSE_UNSIGNED), relay_state: None, }) .unwrap(); @@ -816,7 +821,8 @@ fn test_reject_saml_response_with_xml_comment() { }; let body_bytes = serde_urlencoded::to_string(SamlLoginPost { - saml_response: base64::encode(&SAML_RESPONSE_WITH_COMMENT), + saml_response: base64::engine::general_purpose::STANDARD + .encode(&SAML_RESPONSE_WITH_COMMENT), relay_state: None, }) .unwrap(); @@ -855,7 +861,8 @@ fn test_correct_saml_response_with_group_attributes() { }; let body_bytes = serde_urlencoded::to_string(SamlLoginPost { - saml_response: base64::encode(&SAML_RESPONSE_WITH_GROUPS), + saml_response: base64::engine::general_purpose::STANDARD + .encode(&SAML_RESPONSE_WITH_GROUPS), relay_state: None, }) .unwrap(); @@ -905,7 +912,8 @@ fn test_correct_saml_response_with_group_attributes_wrong_attribute_name() { }; let body_bytes = serde_urlencoded::to_string(SamlLoginPost { - saml_response: base64::encode(&SAML_RESPONSE_WITH_GROUPS), + saml_response: base64::engine::general_purpose::STANDARD + .encode(&SAML_RESPONSE_WITH_GROUPS), relay_state: None, }) .unwrap(); @@ -954,7 +962,8 @@ async fn test_post_saml_response(cptestctx: &ControlPlaneTestContext) { }, idp_metadata_source: params::IdpMetadataSource::Base64EncodedXml { - data: base64::encode(SAML_RESPONSE_IDP_DESCRIPTOR), + data: base64::engine::general_purpose::STANDARD + .encode(SAML_RESPONSE_IDP_DESCRIPTOR), }, idp_entity_id: "https://some.idp.test/oxide_rack/".to_string(), @@ -990,7 +999,8 @@ async fn test_post_saml_response(cptestctx: &ControlPlaneTestContext) { ) .raw_body(Some( serde_urlencoded::to_string(SamlLoginPost { - saml_response: base64::encode(SAML_RESPONSE_WITH_GROUPS), + saml_response: base64::engine::general_purpose::STANDARD + .encode(SAML_RESPONSE_WITH_GROUPS), relay_state: None, }) .unwrap(), @@ -1094,7 +1104,8 @@ async fn test_post_saml_response_with_relay_state( }, idp_metadata_source: params::IdpMetadataSource::Base64EncodedXml { - data: base64::encode(SAML_RESPONSE_IDP_DESCRIPTOR), + data: base64::engine::general_purpose::STANDARD + .encode(SAML_RESPONSE_IDP_DESCRIPTOR), }, idp_entity_id: "https://some.idp.test/oxide_rack/".to_string(), @@ -1130,7 +1141,8 @@ async fn test_post_saml_response_with_relay_state( ) .raw_body(Some( serde_urlencoded::to_string(SamlLoginPost { - saml_response: base64::encode(SAML_RESPONSE), + saml_response: base64::engine::general_purpose::STANDARD + .encode(SAML_RESPONSE), relay_state: Some( console_api::RelayState { referer: Some("/some/actual/nexus/url".to_string()), diff --git a/nexus/tests/integration_tests/silos.rs b/nexus/tests/integration_tests/silos.rs index 17a815b8b72..614bea5e0d8 100644 --- a/nexus/tests/integration_tests/silos.rs +++ b/nexus/tests/integration_tests/silos.rs @@ -2,39 +2,39 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. +use crate::integration_tests::saml::SAML_IDP_DESCRIPTOR; use nexus_test_utils::http_testing::{AuthnMode, NexusRequest, RequestBuilder}; +use nexus_test_utils::resource_helpers::{ + create_local_user, create_organization, create_silo, grant_iam, + object_create, objects_list_page_authz, +}; +use nexus_test_utils_macros::nexus_test; +use omicron_common::api::external::ObjectIdentity; use omicron_common::api::external::{ IdentityMetadataCreateParams, LookupType, Name, }; use omicron_nexus::authn::silos::{AuthenticatedSubject, IdentityProviderType}; +use omicron_nexus::authn::{USER_TEST_PRIVILEGED, USER_TEST_UNPRIVILEGED}; +use omicron_nexus::authz::{self, SiloRole}; use omicron_nexus::context::OpContext; use omicron_nexus::db; +use omicron_nexus::db::fixed_data::silo::{DEFAULT_SILO, SILO_ID}; +use omicron_nexus::db::identity::Asset; use omicron_nexus::db::lookup::LookupPath; use omicron_nexus::external_api::views::{ self, IdentityProvider, Organization, SamlIdentityProvider, Silo, }; use omicron_nexus::external_api::{params, shared}; + use std::collections::{BTreeMap, HashSet}; use std::fmt::Write; use std::str::FromStr; +use base64::Engine; use http::method::Method; use http::StatusCode; -use nexus_test_utils::resource_helpers::{ - create_local_user, create_organization, create_silo, grant_iam, - object_create, objects_list_page_authz, -}; - -use crate::integration_tests::saml::SAML_IDP_DESCRIPTOR; -use nexus_test_utils_macros::nexus_test; -use omicron_nexus::authz::{self, SiloRole}; -use uuid::Uuid; - use httptest::{matchers::*, responders::*, Expectation, Server}; -use omicron_common::api::external::ObjectIdentity; -use omicron_nexus::authn::{USER_TEST_PRIVILEGED, USER_TEST_UNPRIVILEGED}; -use omicron_nexus::db::fixed_data::silo::{DEFAULT_SILO, SILO_ID}; -use omicron_nexus::db::identity::Asset; +use uuid::Uuid; type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; @@ -538,7 +538,8 @@ async fn test_saml_idp_metadata_data_valid( }, idp_metadata_source: params::IdpMetadataSource::Base64EncodedXml { - data: base64::encode(SAML_IDP_DESCRIPTOR.to_string()), + data: base64::engine::general_purpose::STANDARD + .encode(SAML_IDP_DESCRIPTOR.to_string()), }, idp_entity_id: "entity_id".to_string(), @@ -602,7 +603,7 @@ async fn test_saml_idp_metadata_data_truncated( }, idp_metadata_source: params::IdpMetadataSource::Base64EncodedXml { - data: base64::encode({ + data: base64::engine::general_purpose::STANDARD.encode({ let mut saml_idp_descriptor = SAML_IDP_DESCRIPTOR.to_string(); saml_idp_descriptor.truncate(100); @@ -1856,7 +1857,8 @@ async fn test_local_silo_constraints(cptestctx: &ControlPlaneTestContext) { idp_metadata_source: params::IdpMetadataSource::Base64EncodedXml { - data: base64::encode(SAML_IDP_DESCRIPTOR.to_string()), + data: base64::engine::general_purpose::STANDARD + .encode(SAML_IDP_DESCRIPTOR.to_string()), }, idp_entity_id: "entity_id".to_string(), diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index efcd31f2473..e7722c8ec31 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -5,6 +5,7 @@ //! Params define the request bodies of API endpoints for creating or updating resources. use crate::external_api::shared; +use base64::Engine; use chrono::{DateTime, Utc}; use omicron_common::api::external::{ ByteCount, IdentityMetadataCreateParams, IdentityMetadataUpdateParams, @@ -297,12 +298,14 @@ impl<'de> Visitor<'de> for X509CertVisitor { where E: de::Error, { - let raw_bytes = base64::decode(&value.as_bytes()).map_err(|e| { - de::Error::custom(format!( - "could not base64 decode public_cert: {}", - e - )) - })?; + let raw_bytes = base64::engine::general_purpose::STANDARD + .decode(&value.as_bytes()) + .map_err(|e| { + de::Error::custom(format!( + "could not base64 decode public_cert: {}", + e + )) + })?; let _parsed = openssl::x509::X509::from_der(&raw_bytes).map_err(|e| { de::Error::custom(format!( @@ -342,12 +345,14 @@ impl<'de> Visitor<'de> for KeyVisitor { where E: de::Error, { - let raw_bytes = base64::decode(&value).map_err(|e| { - de::Error::custom(format!( - "could not base64 decode private_key: {}", - e - )) - })?; + let raw_bytes = base64::engine::general_purpose::STANDARD + .decode(&value) + .map_err(|e| { + de::Error::custom(format!( + "could not base64 decode private_key: {}", + e + )) + })?; // TODO: samael does not support ECDSA, update to generic PKey type when it does //let _parsed = openssl::pkey::PKey::private_key_from_der(&raw_bytes) @@ -423,7 +428,8 @@ pub struct SamlIdentityProviderCreate { /// sign some junk data and validate it with the key pair fn sign_junk_data(key_pair: &DerEncodedKeyPair) -> Result<(), anyhow::Error> { let private_key = { - let raw_bytes = base64::decode(&key_pair.private_key)?; + let raw_bytes = base64::engine::general_purpose::STANDARD + .decode(&key_pair.private_key)?; // TODO: samael does not support ECDSA, update to generic PKey type when it does //let parsed = openssl::pkey::PKey::private_key_from_der(&raw_bytes)?; let parsed = openssl::rsa::Rsa::private_key_from_der(&raw_bytes)?; @@ -432,7 +438,8 @@ fn sign_junk_data(key_pair: &DerEncodedKeyPair) -> Result<(), anyhow::Error> { }; let public_key = { - let raw_bytes = base64::decode(&key_pair.public_cert)?; + let raw_bytes = base64::engine::general_purpose::STANDARD + .decode(&key_pair.public_cert)?; let parsed = openssl::x509::X509::from_der(&raw_bytes)?; parsed.public_key()? }; @@ -708,14 +715,18 @@ impl UserData { where S: Serializer, { - base64::encode(data).serialize(serializer) + base64::engine::general_purpose::STANDARD + .encode(data) + .serialize(serializer) } pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> where D: Deserializer<'de>, { - match base64::decode(::deserialize(deserializer)?) { + match base64::engine::general_purpose::STANDARD + .decode(::deserialize(deserializer)?) + { Ok(buf) => { // if you change this, also update the stress test in crate::cidata if buf.len() > MAX_USER_DATA_BYTES { diff --git a/package-manifest.toml b/package-manifest.toml index bb030255a87..54ae5e6d31f 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -105,10 +105,10 @@ output.type = "zone" service_name = "propolis-server" source.type = "prebuilt" source.repo = "propolis" -source.commit = "666ded451b13bba0895485c0b34515c0e59c2c6e" +source.commit = "92508d573529a1ee50a9422fbca045a5e980a2b5" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/propolis/image//propolis-server.sha256.txt -source.sha256 = "29b65817eaeb6e9e8f5ce9574f95385ea8f976f352091672a9cde2b2a5103dd3" +source.sha256 = "6fcbff8968addc95e6f7f099f3fc19fe68a4f1192a2aa9f90df2b75f2f94f7da" output.type = "zone" [package.maghemite] From b3d38498ced09b675b577582c8388466f7093c54 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Jan 2023 14:05:10 -0800 Subject: [PATCH 74/80] Bump indicatif from 0.17.2 to 0.17.3 (#2171) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [indicatif](https://github.com/console-rs/indicatif) from 0.17.2 to 0.17.3.
Release notes

Sourced from indicatif's releases.

0.17.3

A small maintenance release:

Full Changelog: https://github.com/console-rs/indicatif/compare/0.17.2...0.17.3

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=indicatif&package-manager=cargo&previous-version=0.17.2&new-version=0.17.3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 78a4925ac1f..faab9f432f6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2574,9 +2574,9 @@ dependencies = [ [[package]] name = "indicatif" -version = "0.17.2" +version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4295cbb7573c16d310e99e713cf9e75101eb190ab31fccd35f2d2691b4352b19" +checksum = "cef509aa9bc73864d6756f0d34d35504af3cf0844373afe9b8669a5b8005a729" dependencies = [ "console", "number_prefix", diff --git a/Cargo.toml b/Cargo.toml index c7318985225..7785655a7c4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -131,7 +131,7 @@ hex = "0.4.3" http = "0.2.7" httptest = "0.15.4" hyper = "0.14" -indicatif = { version = "0.17.2", features = ["rayon"] } +indicatif = { version = "0.17.3", features = ["rayon"] } installinator-artifactd = { path = "installinator-artifactd" } installinator-artifact-client = { path = "installinator-artifact-client" } internal-dns = { path = "internal-dns" } From 653479b2d2b385a553a4326b6cede46dc8c094db Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 17 Jan 2023 19:46:13 -0500 Subject: [PATCH 75/80] Docs on ALLOW_FULL_TABLE_SCAN_SQL --- nexus/src/db/pool.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/nexus/src/db/pool.rs b/nexus/src/db/pool.rs index 3b5c243bf6a..8132ecee29f 100644 --- a/nexus/src/db/pool.rs +++ b/nexus/src/db/pool.rs @@ -72,6 +72,19 @@ impl Pool { const DISALLOW_FULL_TABLE_SCAN_SQL: &str = "set disallow_full_table_scans = on; set large_full_scan_rows = 0;"; + +/// SQL used to enable full table scans for the duration of the current +/// transaction. +/// +/// We normally disallow table scans in effort to identify scalability issues +/// during development. But it's preferable for some ad hoc test-only queries to +/// do table scans (rather than add indexes that are only used for the test +/// suite). +/// +/// This SQL appears to have no effect when used outside of a transaction. +/// That's intentional. We do not want to use `SET` (rather than `SET LOCAL`) +/// here because that would change the behavior for any code that happens to use +/// the same pooled connection after this SQL gets run. #[cfg(test)] pub const ALLOW_FULL_TABLE_SCAN_SQL: &str = "set local disallow_full_table_scans = off; set local large_full_scan_rows = 1000;"; From 3db02c7fecbb3763fe261b9a1affefd8d0845e65 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Tue, 17 Jan 2023 19:48:17 -0500 Subject: [PATCH 76/80] fix mis-merge --- nexus/src/app/disk.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nexus/src/app/disk.rs b/nexus/src/app/disk.rs index 5edd9d19aa2..20be5c50755 100644 --- a/nexus/src/app/disk.rs +++ b/nexus/src/app/disk.rs @@ -539,7 +539,7 @@ impl super::Nexus { .fetch() .await?; - self.volume_remove_read_only_parent(db_disk.volume_id).await?; + self.volume_remove_read_only_parent(&opctx, db_disk.volume_id).await?; Ok(()) } From f96cb955deb8f5c3ac76ea95a75fd4ce7a61d8ca Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 18 Jan 2023 01:23:10 -0500 Subject: [PATCH 77/80] Collect timestamps for Oximeter in CRDB to act as tie-breaker --- common/src/sql/dbinit.sql | 4 +++ nexus/db-model/src/schema.rs | 10 ++++++ .../src/virtual_provisioning_collection.rs | 4 +++ .../src/virtual_provisioning_resource.rs | 4 +++ nexus/src/app/provisioning.rs | 15 ++++++-- nexus/src/db/datastore/silo.rs | 9 ----- .../virtual_provisioning_collection_update.rs | 20 +++++++---- oximeter/db/src/model.rs | 6 ++-- oximeter/oximeter-macro-impl/src/lib.rs | 7 ++-- oximeter/oximeter/src/traits.rs | 7 ++-- oximeter/oximeter/src/types.rs | 35 ++++++++++++------- 11 files changed, 83 insertions(+), 38 deletions(-) diff --git a/common/src/sql/dbinit.sql b/common/src/sql/dbinit.sql index 3bf674cfb4c..3c396799f69 100644 --- a/common/src/sql/dbinit.sql +++ b/common/src/sql/dbinit.sql @@ -137,6 +137,8 @@ CREATE INDEX ON omicron.public.service ( CREATE TABLE omicron.public.virtual_provisioning_collection ( -- Should match the UUID of the corresponding collection. id UUID PRIMARY KEY, + time_modified TIMESTAMPTZ NOT NULL DEFAULT NOW(), + -- Identifies the type of the collection. collection_type STRING(63) NOT NULL, @@ -168,6 +170,8 @@ CREATE TABLE omicron.public.virtual_provisioning_collection ( CREATE TABLE omicron.public.virtual_provisioning_resource ( -- Should match the UUID of the corresponding collection. id UUID PRIMARY KEY, + time_modified TIMESTAMPTZ NOT NULL DEFAULT NOW(), + -- Identifies the type of the resource. resource_type STRING(63) NOT NULL, diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 6d121952981..038f26b2e2f 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -407,6 +407,11 @@ table! { table! { virtual_provisioning_collection { id -> Uuid, + // This type isn't actually "Nullable" - it's just handy to use the + // same type for insertion and querying, and doing so requires this + // field to appear optional so we can let this (default) field appear + // optional. + time_modified -> Nullable, collection_type -> Text, virtual_disk_bytes_provisioned -> Int8, cpus_provisioned -> Int8, @@ -417,6 +422,11 @@ table! { table! { virtual_provisioning_resource { id -> Uuid, + // This type isn't actually "Nullable" - it's just handy to use the + // same type for insertion and querying, and doing so requires this + // field to appear optional so we can let this (default) field appear + // optional. + time_modified -> Nullable, resource_type -> Text, virtual_disk_bytes_provisioned -> Int8, cpus_provisioned -> Int8, diff --git a/nexus/db-model/src/virtual_provisioning_collection.rs b/nexus/db-model/src/virtual_provisioning_collection.rs index 88fa5e52d9c..0a4d9e27958 100644 --- a/nexus/db-model/src/virtual_provisioning_collection.rs +++ b/nexus/db-model/src/virtual_provisioning_collection.rs @@ -4,6 +4,7 @@ use crate::schema::virtual_provisioning_collection; use crate::ByteCount; +use chrono::{DateTime, Utc}; use omicron_common::api::external; use parse_display::Display; use uuid::Uuid; @@ -19,8 +20,10 @@ pub enum CollectionTypeProvisioned { /// Describes virtual_provisioning_collection for a collection #[derive(Clone, Selectable, Queryable, Insertable, Debug)] #[diesel(table_name = virtual_provisioning_collection)] +#[diesel(treat_none_as_default_value = true)] pub struct VirtualProvisioningCollection { pub id: Uuid, + pub time_modified: Option>, pub collection_type: String, pub virtual_disk_bytes_provisioned: ByteCount, @@ -32,6 +35,7 @@ impl VirtualProvisioningCollection { pub fn new(id: Uuid, collection_type: CollectionTypeProvisioned) -> Self { Self { id, + time_modified: None, collection_type: collection_type.to_string(), virtual_disk_bytes_provisioned: ByteCount( external::ByteCount::from(0), diff --git a/nexus/db-model/src/virtual_provisioning_resource.rs b/nexus/db-model/src/virtual_provisioning_resource.rs index cb388710907..237e37b17b4 100644 --- a/nexus/db-model/src/virtual_provisioning_resource.rs +++ b/nexus/db-model/src/virtual_provisioning_resource.rs @@ -4,6 +4,7 @@ use crate::schema::virtual_provisioning_resource; use crate::ByteCount; +use chrono::{DateTime, Utc}; use omicron_common::api::external; use uuid::Uuid; @@ -27,8 +28,10 @@ impl std::fmt::Display for ResourceTypeProvisioned { /// Describes virtual_provisioning_resource for a resource. #[derive(Clone, Selectable, Queryable, Insertable, Debug)] #[diesel(table_name = virtual_provisioning_resource)] +#[diesel(treat_none_as_default_value = true)] pub struct VirtualProvisioningResource { pub id: Uuid, + pub time_modified: Option>, pub resource_type: String, pub virtual_disk_bytes_provisioned: ByteCount, @@ -40,6 +43,7 @@ impl VirtualProvisioningResource { pub fn new(id: Uuid, resource_type: ResourceTypeProvisioned) -> Self { Self { id, + time_modified: None, resource_type: resource_type.to_string(), virtual_disk_bytes_provisioned: ByteCount( external::ByteCount::from(0), diff --git a/nexus/src/app/provisioning.rs b/nexus/src/app/provisioning.rs index dd6713dc409..1501ceed818 100644 --- a/nexus/src/app/provisioning.rs +++ b/nexus/src/app/provisioning.rs @@ -66,7 +66,10 @@ impl Producer { let new_samples = provisions .iter() .map(|provision| { - Sample::new( + Sample::new_with_timestamp( + provision + .time_modified + .expect("Should always have default value"), &CollectionTarget { id: provision.id }, &VirtualDiskSpaceProvisioned { bytes_used: provision @@ -87,13 +90,19 @@ impl Producer { let new_samples = provisions .iter() .map(|provision| { - Sample::new( + Sample::new_with_timestamp( + provision + .time_modified + .expect("Should always have default value"), &CollectionTarget { id: provision.id }, &CpusProvisioned { cpus: provision.cpus_provisioned }, ) }) .chain(provisions.iter().map(|provision| { - Sample::new( + Sample::new_with_timestamp( + provision + .time_modified + .expect("Should always have default value"), &CollectionTarget { id: provision.id }, &RamProvisioned { bytes: provision.ram_provisioned.into() }, ) diff --git a/nexus/src/db/datastore/silo.rs b/nexus/src/db/datastore/silo.rs index 9a80454231c..165ac5ef316 100644 --- a/nexus/src/db/datastore/silo.rs +++ b/nexus/src/db/datastore/silo.rs @@ -147,15 +147,6 @@ impl DataStore { .await? .transaction_async(|conn| async move { let silo = silo_create_query.get_result_async(&conn).await?; - use db::schema::virtual_provisioning_collection::dsl; - diesel::insert_into(dsl::virtual_provisioning_collection) - .values(VirtualProvisioningCollection::new( - silo.id(), - CollectionTypeProvisioned::Silo, - )) - .execute_async(&conn) - .await?; - self.virtual_provisioning_collection_create_on_connection( &conn, VirtualProvisioningCollection::new( diff --git a/nexus/src/db/queries/virtual_provisioning_collection_update.rs b/nexus/src/db/queries/virtual_provisioning_collection_update.rs index 13dd0352906..cd9c5fd4d63 100644 --- a/nexus/src/db/queries/virtual_provisioning_collection_update.rs +++ b/nexus/src/db/queries/virtual_provisioning_collection_update.rs @@ -322,9 +322,12 @@ impl VirtualProvisioningCollectionUpdate { // Within this project, org, silo, fleet... project_id, // ... We add the disk usage. - collection_dsl::virtual_disk_bytes_provisioned - .eq(collection_dsl::virtual_disk_bytes_provisioned - + disk_byte_diff), + ( + collection_dsl::time_modified.eq(diesel::dsl::now), + collection_dsl::virtual_disk_bytes_provisioned + .eq(collection_dsl::virtual_disk_bytes_provisioned + + disk_byte_diff), + ), ) } @@ -348,9 +351,12 @@ impl VirtualProvisioningCollectionUpdate { // Within this project, org, silo, fleet... project_id, // ... We subtract the disk usage. - collection_dsl::virtual_disk_bytes_provisioned - .eq(collection_dsl::virtual_disk_bytes_provisioned - - disk_byte_diff), + ( + collection_dsl::time_modified.eq(diesel::dsl::now), + collection_dsl::virtual_disk_bytes_provisioned + .eq(collection_dsl::virtual_disk_bytes_provisioned + - disk_byte_diff), + ), ) } @@ -386,6 +392,7 @@ impl VirtualProvisioningCollectionUpdate { project_id, // ... We update the resource usage. ( + collection_dsl::time_modified.eq(diesel::dsl::now), collection_dsl::cpus_provisioned .eq(collection_dsl::cpus_provisioned + cpus_diff), collection_dsl::ram_provisioned @@ -416,6 +423,7 @@ impl VirtualProvisioningCollectionUpdate { project_id, // ... We update the resource usage. ( + collection_dsl::time_modified.eq(diesel::dsl::now), collection_dsl::cpus_provisioned .eq(collection_dsl::cpus_provisioned - cpus_diff), collection_dsl::ram_provisioned diff --git a/oximeter/db/src/model.rs b/oximeter/db/src/model.rs index 78e769bc476..dde3ed4f007 100644 --- a/oximeter/db/src/model.rs +++ b/oximeter/db/src/model.rs @@ -661,7 +661,7 @@ where { fn from(sample: DbTimeseriesScalarGaugeSample) -> Measurement { let datum = Datum::from(sample.datum); - Measurement::with_timestamp(sample.timestamp, datum) + Measurement::new(sample.timestamp, datum) } } @@ -674,7 +674,7 @@ where let cumulative = Cumulative::with_start_time(sample.start_time, sample.datum); let datum = Datum::from(cumulative); - Measurement::with_timestamp(sample.timestamp, datum) + Measurement::new(sample.timestamp, datum) } } @@ -692,7 +692,7 @@ where ) .unwrap(), ); - Measurement::with_timestamp(sample.timestamp, datum) + Measurement::new(sample.timestamp, datum) } } diff --git a/oximeter/oximeter-macro-impl/src/lib.rs b/oximeter/oximeter-macro-impl/src/lib.rs index 604e8b234a7..f7bb407ee32 100644 --- a/oximeter/oximeter-macro-impl/src/lib.rs +++ b/oximeter/oximeter-macro-impl/src/lib.rs @@ -207,8 +207,11 @@ fn build_metric_trait_impl( &mut self.#datum_field_ident } - fn measure(&self) -> ::oximeter::Measurement { - ::oximeter::Measurement::new(::oximeter::Datum::from(&self.#datum_field_ident)) + fn measure(&self, timestamp: ::chrono::DateTime) -> ::oximeter::Measurement { + ::oximeter::Measurement::new( + timestamp, + ::oximeter::Datum::from(&self.#datum_field_ident) + ) } fn start_time(&self) -> Option<::chrono::DateTime<::chrono::Utc>> { diff --git a/oximeter/oximeter/src/traits.rs b/oximeter/oximeter/src/traits.rs index b9b70f240df..cb6a7e8b935 100644 --- a/oximeter/oximeter/src/traits.rs +++ b/oximeter/oximeter/src/traits.rs @@ -107,6 +107,7 @@ pub trait Target { /// Example /// ------- /// ```rust +/// use chrono::Utc; /// use oximeter::Metric; /// /// // A gauge with a floating-point value. @@ -118,7 +119,7 @@ pub trait Target { /// /// let met = MyMetric { name: "name".into(), datum: 0.0 }; /// assert_eq!(met.datum_type(), oximeter::DatumType::F64); -/// let measurement = met.measure(); +/// let measurement = met.measure(Utc::now()); /// assert!(measurement.start_time().is_none()); /// assert_eq!(measurement.datum(), &oximeter::Datum::F64(0.0)); /// ``` @@ -166,8 +167,8 @@ pub trait Metric { /// Return a mutable reference to the underlying metric itself. fn datum_mut(&mut self) -> &mut Self::Datum; - /// Sample the underlying metric, returning a measurement from it. - fn measure(&self) -> Measurement; + /// Sample the underlying metric, with a caller-supplied timestamp. + fn measure(&self, timestamp: DateTime) -> Measurement; /// Return true if the metric is cumulative, else false. fn is_cumulative(&self) -> bool { diff --git a/oximeter/oximeter/src/types.rs b/oximeter/oximeter/src/types.rs index 0a25c2ddf75..a7d1c273da8 100644 --- a/oximeter/oximeter/src/types.rs +++ b/oximeter/oximeter/src/types.rs @@ -355,13 +355,8 @@ impl PartialEq<&Measurement> for Measurement { impl Measurement { /// Construct a `Measurement` with the given timestamp. - pub fn with_timestamp(timestamp: DateTime, datum: Datum) -> Self { - Self { timestamp, datum } - } - - /// Generate a new measurement from a `Datum`, using the current time as the timestamp - pub fn new>(datum: D) -> Measurement { - Measurement { timestamp: Utc::now(), datum: datum.into() } + pub fn new>(timestamp: DateTime, datum: D) -> Self { + Self { timestamp, datum: datum.into() } } /// Return the datum for this measurement @@ -531,11 +526,15 @@ impl PartialEq for Sample { } impl Sample { - /// Construct a new sample. + /// Construct a new sample, recorded at the time of the supplied timestamp. /// /// This materializes the data from the target and metric, and stores that information along /// with the measurement data itself. - pub fn new(target: &T, metric: &M) -> Self + pub fn new_with_timestamp( + timestamp: DateTime, + target: &T, + metric: &M, + ) -> Self where T: traits::Target, M: traits::Metric, @@ -544,10 +543,22 @@ impl Sample { timeseries_name: format!("{}:{}", target.name(), metric.name()), target: FieldSet::from_target(target), metric: FieldSet::from_metric(metric), - measurement: metric.measure(), + measurement: metric.measure(timestamp), } } + /// Construct a new sample, created at the time the function is called. + /// + /// This materializes the data from the target and metric, and stores that information along + /// with the measurement data itself. + pub fn new(target: &T, metric: &M) -> Self + where + T: traits::Target, + M: traits::Metric, + { + Self::new_with_timestamp(Utc::now(), target, metric) + } + /// Return the fields for this sample. /// /// This returns the target fields and metric fields, chained, although there is no distinction @@ -709,12 +720,12 @@ mod tests { #[test] fn test_measurement() { - let measurement = Measurement::new(0i64); + let measurement = Measurement::new(chrono::Utc::now(), 0i64); assert_eq!(measurement.datum_type(), DatumType::I64); assert_eq!(measurement.start_time(), None); let datum = Cumulative::new(0i64); - let measurement = Measurement::new(datum.clone()); + let measurement = Measurement::new(chrono::Utc::now(), datum.clone()); assert_eq!(measurement.datum(), &Datum::from(datum)); assert!(measurement.start_time().is_some()); assert!(measurement.timestamp() >= measurement.start_time().unwrap()); From 57c73898713255162f12b0921b25f4fc5037b3a3 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 18 Jan 2023 01:31:14 -0500 Subject: [PATCH 78/80] Fix bad merge --- nexus/src/internal_api/http_entrypoints.rs | 25 ---------------------- 1 file changed, 25 deletions(-) diff --git a/nexus/src/internal_api/http_entrypoints.rs b/nexus/src/internal_api/http_entrypoints.rs index c2bae469751..8f22df4ca43 100644 --- a/nexus/src/internal_api/http_entrypoints.rs +++ b/nexus/src/internal_api/http_entrypoints.rs @@ -253,31 +253,6 @@ async fn cpapi_disk_remove_read_only_parent( apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/// Request removal of a read_only_parent from a disk -/// This is a thin wrapper around the volume_remove_read_only_parent saga. -/// All we are doing here is, given a disk UUID, figure out what the -/// volume_id is for that disk, then use that to call the -/// volume_remove_read_only_parent saga on it. -#[endpoint { - method = POST, - path = "/disk/{disk_id}/remove-read-only-parent", - }] -async fn cpapi_disk_remove_read_only_parent( - rqctx: Arc>>, - path_params: Path, -) -> Result { - let apictx = rqctx.context(); - let nexus = &apictx.nexus; - let path = path_params.into_inner(); - - let handler = async { - let opctx = OpContext::for_internal_api(&rqctx).await; - nexus.disk_remove_read_only_parent(&opctx, path.disk_id).await?; - Ok(HttpResponseUpdatedNoContent()) - }; - apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await -} - /// Accept a registration from a new metric producer #[endpoint { method = POST, From e18210e9aa67950bb7d05be542f33b4d661244ae Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 18 Jan 2023 11:11:16 -0500 Subject: [PATCH 79/80] mitigate races in metrics test --- nexus/tests/integration_tests/instances.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index c847ef24d75..d1b3ec0ba51 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -4,7 +4,9 @@ //! Tests basic instance support in the API -use super::metrics::query_for_latest_metric; +use super::metrics::{ + query_for_latest_metric, query_for_metrics_until_they_exist, +}; use chrono::Utc; use http::method::Method; @@ -562,6 +564,12 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { }; oximeter.force_collect().await; for id in vec![organization_id, project_id] { + query_for_metrics_until_they_exist( + client, + &metric_url("virtual_disk_space_provisioned", id), + ) + .await; + assert_eq!( query_for_latest_metric( client, From 9f498f08dc15100b29a5aad54fd817e86c66fe50 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 18 Jan 2023 14:04:29 -0500 Subject: [PATCH 80/80] metrics test needs a wider time window, especially when under load --- nexus/tests/integration_tests/instances.rs | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index d1b3ec0ba51..08ceea19113 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -4,9 +4,7 @@ //! Tests basic instance support in the API -use super::metrics::{ - query_for_latest_metric, query_for_metrics_until_they_exist, -}; +use super::metrics::query_for_latest_metric; use chrono::Utc; use http::method::Method; @@ -558,18 +556,12 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { let metric_url = |metric_type: &str, id: Uuid| { format!( "/system/metrics/{metric_type}?start_time={:?}&end_time={:?}&id={id}", - Utc::now() - chrono::Duration::seconds(10), - Utc::now() + chrono::Duration::seconds(10), + Utc::now() - chrono::Duration::seconds(30), + Utc::now() + chrono::Duration::seconds(30), ) }; oximeter.force_collect().await; for id in vec![organization_id, project_id] { - query_for_metrics_until_they_exist( - client, - &metric_url("virtual_disk_space_provisioned", id), - ) - .await; - assert_eq!( query_for_latest_metric( client,