diff --git a/nexus/src/db/datastore.rs b/nexus/src/db/datastore.rs deleted file mode 100644 index fc4d4ef3d54..00000000000 --- a/nexus/src/db/datastore.rs +++ /dev/null @@ -1,5315 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -//! Primary control plane interface for database read and write operations - -// TODO-scalability review all queries for use of indexes (may need -// "time_deleted IS NOT NULL" conditions) Figure out how to automate this. -// -// TODO-design Better support for joins? -// The interfaces here often require that to do anything with an object, a -// caller must first look up the id and then do operations with the id. For -// example, the caller of project_list_disks() always looks up the project to -// get the project_id, then lists disks having that project_id. It's possible -// to implement this instead with a JOIN in the database so that we do it with -// one database round-trip. We could use CTEs similar to what we do with -// conditional updates to distinguish the case where the project didn't exist -// vs. there were no disks in it. This seems likely to be a fair bit more -// complicated to do safely and generally compared to what we have now. - -use super::collection_insert::{ - AsyncInsertError, DatastoreCollection, SyncInsertError, -}; -use super::error::diesel_pool_result_optional; -use super::identity::{Asset, Resource}; -use super::pool::DbConnection; -use super::Pool; -use crate::authn; -use crate::authz::{self, ApiResource}; -use crate::context::OpContext; -use crate::db::collection_attach::{AttachError, DatastoreAttachTarget}; -use crate::db::collection_detach::{DatastoreDetachTarget, DetachError}; -use crate::db::collection_detach_many::{ - DatastoreDetachManyTarget, DetachManyError, -}; -use crate::db::fixed_data::role_assignment::BUILTIN_ROLE_ASSIGNMENTS; -use crate::db::fixed_data::role_builtin::BUILTIN_ROLES; -use crate::db::fixed_data::silo::DEFAULT_SILO; -use crate::db::lookup::LookupPath; -use crate::db::model::DatabaseString; -use crate::db::model::IncompleteInstanceExternalIp; -use crate::db::model::IncompleteVpc; -use crate::db::model::InstanceExternalIp; -use crate::db::model::IpPool; -use crate::db::model::IpPoolRange; -use crate::db::model::IpPoolUpdate; -use crate::db::model::NetworkInterfaceUpdate; -use crate::db::model::Vpc; -use crate::db::queries::external_ip::NextExternalIp; -use crate::db::queries::ip_pool::FilterOverlappingIpRanges; -use crate::db::queries::network_interface; -use crate::db::queries::vpc::InsertVpcQuery; -use crate::db::queries::vpc_subnet::FilterConflictingVpcSubnetRangesQuery; -use crate::db::queries::vpc_subnet::SubnetError; -use crate::db::{ - self, - error::{ - public_error_from_diesel_create, public_error_from_diesel_lookup, - public_error_from_diesel_pool, ErrorHandler, TransactionError, - }, - model::{ - ConsoleSession, Dataset, DatasetKind, DeviceAccessToken, - DeviceAuthRequest, Disk, DiskRuntimeState, Generation, GlobalImage, - IdentityProvider, IncompleteNetworkInterface, Instance, - InstanceRuntimeState, Name, NetworkInterface, Organization, - OrganizationUpdate, OximeterInfo, ProducerEndpoint, Project, - ProjectUpdate, Rack, Region, RoleAssignment, RoleBuiltin, RouterRoute, - RouterRouteUpdate, Service, Silo, SiloUser, Sled, SshKey, - UpdateAvailableArtifact, UserBuiltin, Volume, VpcFirewallRule, - VpcRouter, VpcRouterUpdate, VpcSubnet, VpcSubnetUpdate, VpcUpdate, - Zpool, - }, - pagination::paginated, - pagination::paginated_multicolumn, - update_and_check::{UpdateAndCheck, UpdateStatus}, -}; -use crate::external_api::shared::IpRange; -use crate::external_api::{params, shared}; -use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl, ConnectionManager}; -use chrono::Utc; -use db::model::IdentityType; -use diesel::pg::Pg; -use diesel::prelude::*; -use diesel::query_builder::{QueryFragment, QueryId}; -use diesel::query_dsl::methods::LoadQuery; -use diesel::upsert::excluded; -use diesel::{ExpressionMethods, QueryDsl, SelectableHelper}; -use ipnetwork::IpNetwork; -use omicron_common::api; -use omicron_common::api::external::DataPageParams; -use omicron_common::api::external::DeleteResult; -use omicron_common::api::external::Error; -use omicron_common::api::external::ListResultVec; -use omicron_common::api::external::LookupResult; -use omicron_common::api::external::LookupType; -use omicron_common::api::external::ResourceType; -use omicron_common::api::external::UpdateResult; -use omicron_common::api::external::{self, InternalContext}; -use omicron_common::api::external::{ - CreateResult, IdentityMetadataCreateParams, -}; -use omicron_common::bail_unless; -use sled_agent_client::types as sled_client_types; -use std::convert::{TryFrom, TryInto}; -use std::net::Ipv6Addr; -use std::sync::Arc; -use uuid::Uuid; - -// Number of unique datasets required to back a region. -// TODO: This should likely turn into a configuration option. -const REGION_REDUNDANCY_THRESHOLD: usize = 3; - -// Represents a query that is ready to be executed. -// -// This helper trait lets the statement either be executed or explained. -// -// U: The output type of executing the statement. -trait RunnableQuery: - RunQueryDsl - + QueryFragment - + LoadQuery<'static, DbConnection, U> - + QueryId -{ -} - -impl RunnableQuery for T where - T: RunQueryDsl - + QueryFragment - + LoadQuery<'static, DbConnection, U> - + QueryId -{ -} - -pub struct DataStore { - pool: Arc, -} - -impl DataStore { - pub fn new(pool: Arc) -> Self { - DataStore { pool } - } - - // TODO-security This should be deprecated in favor of pool_authorized(), - // which gives us the chance to do a minimal security check before hitting - // the database. Eventually, this function should only be used for doing - // authentication in the first place (since we can't do an authz check in - // that case). - fn pool(&self) -> &bb8::Pool> { - self.pool.pool() - } - - pub(super) async fn pool_authorized( - &self, - opctx: &OpContext, - ) -> Result<&bb8::Pool>, Error> { - opctx.authorize(authz::Action::Query, &authz::DATABASE).await?; - Ok(self.pool.pool()) - } - - pub async fn rack_list( - &self, - opctx: &OpContext, - pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResultVec { - opctx.authorize(authz::Action::Read, &authz::FLEET).await?; - use db::schema::rack::dsl; - paginated(dsl::rack, dsl::id, pagparams) - .select(Rack::as_select()) - .load_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - /// Stores a new rack in the database. - /// - /// This function is a no-op if the rack already exists. - pub async fn rack_insert( - &self, - opctx: &OpContext, - rack: &Rack, - ) -> Result { - use db::schema::rack::dsl; - - diesel::insert_into(dsl::rack) - .values(rack.clone()) - .on_conflict(dsl::id) - .do_update() - // This is a no-op, since we conflicted on the ID. - .set(dsl::id.eq(excluded(dsl::id))) - .returning(Rack::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::Rack, - &rack.id().to_string(), - ), - ) - }) - } - - /// Update a rack to mark that it has been initialized - pub async fn rack_set_initialized( - &self, - opctx: &OpContext, - rack_id: Uuid, - services: Vec, - ) -> UpdateResult { - use db::schema::rack::dsl as rack_dsl; - use db::schema::service::dsl as service_dsl; - - #[derive(Debug)] - enum RackInitError { - ServiceInsert { err: SyncInsertError, sled_id: Uuid, svc_id: Uuid }, - RackUpdate(diesel::result::Error), - } - type TxnError = TransactionError; - - // NOTE: This operation could likely be optimized with a CTE, but given - // the low-frequency of calls, this optimization has been deferred. - self.pool_authorized(opctx) - .await? - .transaction(move |conn| { - // Early exit if the rack has already been initialized. - let rack = rack_dsl::rack - .filter(rack_dsl::id.eq(rack_id)) - .select(Rack::as_select()) - .get_result(conn) - .map_err(|e| { - TxnError::CustomError(RackInitError::RackUpdate(e)) - })?; - if rack.initialized { - return Ok(rack); - } - - // Otherwise, insert services and set rack.initialized = true. - for svc in services { - let sled_id = svc.sled_id; - >::insert_resource( - sled_id, - diesel::insert_into(service_dsl::service) - .values(svc.clone()) - .on_conflict(service_dsl::id) - .do_update() - .set(( - service_dsl::time_modified.eq(Utc::now()), - service_dsl::sled_id - .eq(excluded(service_dsl::sled_id)), - service_dsl::ip.eq(excluded(service_dsl::ip)), - service_dsl::kind - .eq(excluded(service_dsl::kind)), - )), - ) - .insert_and_get_result(conn) - .map_err(|err| { - TxnError::CustomError(RackInitError::ServiceInsert { - err, - sled_id, - svc_id: svc.id(), - }) - })?; - } - diesel::update(rack_dsl::rack) - .filter(rack_dsl::id.eq(rack_id)) - .set(( - rack_dsl::initialized.eq(true), - rack_dsl::time_modified.eq(Utc::now()), - )) - .returning(Rack::as_returning()) - .get_result::(conn) - .map_err(|e| { - TxnError::CustomError(RackInitError::RackUpdate(e)) - }) - }) - .await - .map_err(|e| match e { - TxnError::CustomError(RackInitError::ServiceInsert { - err, - sled_id, - svc_id, - }) => match err { - SyncInsertError::CollectionNotFound => { - Error::ObjectNotFound { - type_name: ResourceType::Sled, - lookup_type: LookupType::ById(sled_id), - } - } - SyncInsertError::DatabaseError(e) => { - public_error_from_diesel_create( - e, - ResourceType::Service, - &svc_id.to_string(), - ) - } - }, - TxnError::CustomError(RackInitError::RackUpdate(err)) => { - public_error_from_diesel_lookup( - err, - ResourceType::Rack, - &LookupType::ById(rack_id), - ) - } - TxnError::Pool(e) => { - Error::internal_error(&format!("Transaction error: {}", e)) - } - }) - } - - /// Stores a new sled in the database. - pub async fn sled_upsert(&self, sled: Sled) -> CreateResult { - use db::schema::sled::dsl; - diesel::insert_into(dsl::sled) - .values(sled.clone()) - .on_conflict(dsl::id) - .do_update() - .set(( - dsl::time_modified.eq(Utc::now()), - dsl::ip.eq(sled.ip), - dsl::port.eq(sled.port), - )) - .returning(Sled::as_returning()) - .get_result_async(self.pool()) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::Sled, - &sled.id().to_string(), - ), - ) - }) - } - - pub async fn sled_list( - &self, - opctx: &OpContext, - pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResultVec { - opctx.authorize(authz::Action::Read, &authz::FLEET).await?; - use db::schema::sled::dsl; - paginated(dsl::sled, dsl::id, pagparams) - .select(Sled::as_select()) - .load_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - /// Stores a new zpool in the database. - pub async fn zpool_upsert(&self, zpool: Zpool) -> CreateResult { - use db::schema::zpool::dsl; - - let sled_id = zpool.sled_id; - Sled::insert_resource( - sled_id, - diesel::insert_into(dsl::zpool) - .values(zpool.clone()) - .on_conflict(dsl::id) - .do_update() - .set(( - dsl::time_modified.eq(Utc::now()), - dsl::sled_id.eq(excluded(dsl::sled_id)), - dsl::total_size.eq(excluded(dsl::total_size)), - )), - ) - .insert_and_get_result_async(self.pool()) - .await - .map_err(|e| match e { - AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { - type_name: ResourceType::Sled, - lookup_type: LookupType::ById(sled_id), - }, - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::Zpool, - &zpool.id().to_string(), - ), - ) - } - }) - } - - /// Stores a new dataset in the database. - pub async fn dataset_upsert( - &self, - dataset: Dataset, - ) -> CreateResult { - use db::schema::dataset::dsl; - - let zpool_id = dataset.pool_id; - Zpool::insert_resource( - zpool_id, - diesel::insert_into(dsl::dataset) - .values(dataset.clone()) - .on_conflict(dsl::id) - .do_update() - .set(( - dsl::time_modified.eq(Utc::now()), - dsl::pool_id.eq(excluded(dsl::pool_id)), - dsl::ip.eq(excluded(dsl::ip)), - dsl::port.eq(excluded(dsl::port)), - dsl::kind.eq(excluded(dsl::kind)), - )), - ) - .insert_and_get_result_async(self.pool()) - .await - .map_err(|e| match e { - AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { - type_name: ResourceType::Zpool, - lookup_type: LookupType::ById(zpool_id), - }, - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::Dataset, - &dataset.id().to_string(), - ), - ) - } - }) - } - - /// Stores a new service in the database. - pub async fn service_upsert( - &self, - opctx: &OpContext, - service: Service, - ) -> CreateResult { - use db::schema::service::dsl; - - let sled_id = service.sled_id; - Sled::insert_resource( - sled_id, - diesel::insert_into(dsl::service) - .values(service.clone()) - .on_conflict(dsl::id) - .do_update() - .set(( - dsl::time_modified.eq(Utc::now()), - dsl::sled_id.eq(excluded(dsl::sled_id)), - dsl::ip.eq(excluded(dsl::ip)), - dsl::kind.eq(excluded(dsl::kind)), - )), - ) - .insert_and_get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| match e { - AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { - type_name: ResourceType::Sled, - lookup_type: LookupType::ById(sled_id), - }, - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::Service, - &service.id().to_string(), - ), - ) - } - }) - } - - fn get_allocated_regions_query( - volume_id: Uuid, - ) -> impl RunnableQuery<(Dataset, Region)> { - use db::schema::dataset::dsl as dataset_dsl; - use db::schema::region::dsl as region_dsl; - region_dsl::region - .filter(region_dsl::volume_id.eq(volume_id)) - .inner_join( - dataset_dsl::dataset - .on(region_dsl::dataset_id.eq(dataset_dsl::id)), - ) - .select((Dataset::as_select(), Region::as_select())) - } - - /// Gets allocated regions for a disk, and the datasets to which those - /// regions belong. - /// - /// Note that this function does not validate liveness of the Disk, so it - /// may be used in a context where the disk is being deleted. - pub async fn get_allocated_regions( - &self, - volume_id: Uuid, - ) -> Result, Error> { - Self::get_allocated_regions_query(volume_id) - .get_results_async::<(Dataset, Region)>(self.pool()) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - fn get_allocatable_datasets_query() -> impl RunnableQuery { - use db::schema::dataset::dsl; - - dsl::dataset - // We look for valid datasets (non-deleted crucible datasets). - .filter(dsl::size_used.is_not_null()) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::kind.eq(DatasetKind::Crucible)) - .order(dsl::size_used.asc()) - // TODO: We admittedly don't actually *fail* any request for - // running out of space - we try to send the request down to - // crucible agents, and expect them to fail on our behalf in - // out-of-storage conditions. This should undoubtedly be - // handled more explicitly. - .select(Dataset::as_select()) - .limit(REGION_REDUNDANCY_THRESHOLD.try_into().unwrap()) - } - - async fn get_block_size_from_disk_create( - &self, - opctx: &OpContext, - disk_create: ¶ms::DiskCreate, - ) -> Result { - match &disk_create.disk_source { - params::DiskSource::Blank { block_size } => { - Ok(db::model::BlockSize::try_from(*block_size) - .map_err(|e| Error::invalid_request(&e.to_string()))?) - } - params::DiskSource::Snapshot { snapshot_id: _ } => { - // Until we implement snapshots, do not allow disks to be - // created from a snapshot. - return Err(Error::InvalidValue { - label: String::from("snapshot"), - message: String::from("snapshots are not yet supported"), - }); - } - params::DiskSource::Image { image_id: _ } => { - // Until we implement project images, do not allow disks to be - // created from a project image. - return Err(Error::InvalidValue { - label: String::from("image"), - message: String::from( - "project image are not yet supported", - ), - }); - } - params::DiskSource::GlobalImage { image_id } => { - let (.., db_global_image) = LookupPath::new(opctx, &self) - .global_image_id(*image_id) - .fetch() - .await?; - - Ok(db_global_image.block_size) - } - } - } - - /// Idempotently allocates enough regions to back a disk. - /// - /// Returns the allocated regions, as well as the datasets to which they - /// belong. - pub async fn region_allocate( - &self, - opctx: &OpContext, - volume_id: Uuid, - params: ¶ms::DiskCreate, - ) -> Result, Error> { - use db::schema::dataset::dsl as dataset_dsl; - use db::schema::region::dsl as region_dsl; - - // ALLOCATION POLICY - // - // NOTE: This policy can - and should! - be changed. - // - // See https://rfd.shared.oxide.computer/rfd/0205 for a more - // complete discussion. - // - // It is currently acting as a placeholder, showing a feasible - // interaction between datasets and regions. - // - // This policy allocates regions to distinct Crucible datasets, - // favoring datasets with the smallest existing (summed) region - // sizes. Basically, "pick the datasets with the smallest load first". - // - // Longer-term, we should consider: - // - Storage size + remaining free space - // - Sled placement of datasets - // - What sort of loads we'd like to create (even split across all disks - // may not be preferable, especially if maintenance is expected) - #[derive(Debug, thiserror::Error)] - enum RegionAllocateError { - #[error("Not enough datasets for replicated allocation: {0}")] - NotEnoughDatasets(usize), - } - type TxnError = TransactionError; - - let params: params::DiskCreate = params.clone(); - let block_size = - self.get_block_size_from_disk_create(opctx, ¶ms).await?; - let blocks_per_extent = - params.extent_size() / block_size.to_bytes() as i64; - - self.pool() - .transaction(move |conn| { - // First, for idempotency, check if regions are already - // allocated to this disk. - // - // If they are, return those regions and the associated - // datasets. - let datasets_and_regions = - Self::get_allocated_regions_query(volume_id) - .get_results::<(Dataset, Region)>(conn)?; - if !datasets_and_regions.is_empty() { - return Ok(datasets_and_regions); - } - - let mut datasets: Vec = - Self::get_allocatable_datasets_query() - .get_results::(conn)?; - - if datasets.len() < REGION_REDUNDANCY_THRESHOLD { - return Err(TxnError::CustomError( - RegionAllocateError::NotEnoughDatasets(datasets.len()), - )); - } - - // Create identical regions on each of the following datasets. - let source_datasets = - &mut datasets[0..REGION_REDUNDANCY_THRESHOLD]; - let regions: Vec = source_datasets - .iter() - .map(|dataset| { - Region::new( - dataset.id(), - volume_id, - block_size.into(), - blocks_per_extent, - params.extent_count(), - ) - }) - .collect(); - let regions = diesel::insert_into(region_dsl::region) - .values(regions) - .returning(Region::as_returning()) - .get_results(conn)?; - - // Update the tallied sizes in the source datasets containing - // those regions. - let region_size = i64::from(block_size.to_bytes()) - * blocks_per_extent - * params.extent_count(); - for dataset in source_datasets.iter_mut() { - dataset.size_used = - dataset.size_used.map(|v| v + region_size); - } - - let dataset_ids: Vec = - source_datasets.iter().map(|ds| ds.id()).collect(); - diesel::update(dataset_dsl::dataset) - .filter(dataset_dsl::id.eq_any(dataset_ids)) - .set( - dataset_dsl::size_used - .eq(dataset_dsl::size_used + region_size), - ) - .execute(conn)?; - - // Return the regions with the datasets to which they were allocated. - Ok(source_datasets - .into_iter() - .map(|d| d.clone()) - .zip(regions) - .collect()) - }) - .await - .map_err(|e| match e { - TxnError::CustomError( - RegionAllocateError::NotEnoughDatasets(_), - ) => Error::unavail("Not enough datasets to allocate disks"), - _ => { - Error::internal_error(&format!("Transaction error: {}", e)) - } - }) - } - - /// Deletes all regions backing a disk. - /// - /// Also updates the storage usage on their corresponding datasets. - pub async fn regions_hard_delete(&self, volume_id: Uuid) -> DeleteResult { - use db::schema::dataset::dsl as dataset_dsl; - use db::schema::region::dsl as region_dsl; - - // Remove the regions, collecting datasets they're from. - let (dataset_id, size) = diesel::delete(region_dsl::region) - .filter(region_dsl::volume_id.eq(volume_id)) - .returning(( - region_dsl::dataset_id, - region_dsl::block_size - * region_dsl::blocks_per_extent - * region_dsl::extent_count, - )) - .get_result_async::<(Uuid, i64)>(self.pool()) - .await - .map_err(|e| { - Error::internal_error(&format!( - "error deleting regions: {:?}", - e - )) - })?; - - // Update those datasets to which the regions belonged. - diesel::update(dataset_dsl::dataset) - .filter(dataset_dsl::id.eq(dataset_id)) - .set(dataset_dsl::size_used.eq(dataset_dsl::size_used - size)) - .execute_async(self.pool()) - .await - .map_err(|e| { - Error::internal_error(&format!( - "error updating dataset space: {:?}", - e - )) - })?; - - Ok(()) - } - - pub async fn volume_create(&self, volume: Volume) -> CreateResult { - use db::schema::volume::dsl; - - diesel::insert_into(dsl::volume) - .values(volume.clone()) - .on_conflict(dsl::id) - .do_nothing() - .returning(Volume::as_returning()) - .get_result_async(self.pool()) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::Volume, - volume.id().to_string().as_str(), - ), - ) - }) - } - - pub async fn volume_delete(&self, volume_id: Uuid) -> DeleteResult { - use db::schema::volume::dsl; - - let now = Utc::now(); - diesel::update(dsl::volume) - .filter(dsl::id.eq(volume_id)) - .set(dsl::time_deleted.eq(now)) - .check_if_exists::(volume_id) - .execute_and_check(self.pool()) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByLookup( - ResourceType::Volume, - LookupType::ById(volume_id), - ), - ) - })?; - Ok(()) - } - - pub async fn volume_get(&self, volume_id: Uuid) -> LookupResult { - use db::schema::volume::dsl; - - dsl::volume - .filter(dsl::id.eq(volume_id)) - .select(Volume::as_select()) - .get_result_async(self.pool()) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - /// Create a organization - pub async fn organization_create( - &self, - opctx: &OpContext, - organization: ¶ms::OrganizationCreate, - ) -> CreateResult { - let authz_silo = opctx - .authn - .silo_required() - .internal_context("creating an Organization")?; - opctx.authorize(authz::Action::CreateChild, &authz_silo).await?; - - use db::schema::organization::dsl; - let silo_id = authz_silo.id(); - let organization = Organization::new(organization.clone(), silo_id); - let name = organization.name().as_str().to_string(); - - Silo::insert_resource( - silo_id, - diesel::insert_into(dsl::organization).values(organization), - ) - .insert_and_get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| match e { - AsyncInsertError::CollectionNotFound => Error::InternalError { - internal_message: format!( - "attempting to create an \ - organization under non-existent silo {}", - silo_id - ), - }, - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict(ResourceType::Organization, &name), - ) - } - }) - } - - /// Delete a organization - pub async fn organization_delete( - &self, - opctx: &OpContext, - authz_org: &authz::Organization, - db_org: &db::model::Organization, - ) -> DeleteResult { - opctx.authorize(authz::Action::Delete, authz_org).await?; - - use db::schema::organization::dsl; - use db::schema::project; - - // Make sure there are no projects present within this organization. - let project_found = diesel_pool_result_optional( - project::dsl::project - .filter(project::dsl::organization_id.eq(authz_org.id())) - .filter(project::dsl::time_deleted.is_null()) - .select(project::dsl::id) - .limit(1) - .first_async::(self.pool_authorized(opctx).await?) - .await, - ) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; - if project_found.is_some() { - return Err(Error::InvalidRequest { - message: "organization to be deleted contains a project" - .to_string(), - }); - } - - let now = Utc::now(); - let updated_rows = diesel::update(dsl::organization) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(authz_org.id())) - .filter(dsl::rcgen.eq(db_org.rcgen)) - .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_org), - ) - })?; - - if updated_rows == 0 { - return Err(Error::InvalidRequest { - message: "deletion failed due to concurrent modification" - .to_string(), - }); - } - Ok(()) - } - - pub async fn organizations_list_by_id( - &self, - opctx: &OpContext, - pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResultVec { - let authz_silo = opctx - .authn - .silo_required() - .internal_context("listing Organizations")?; - opctx.authorize(authz::Action::ListChildren, &authz_silo).await?; - - use db::schema::organization::dsl; - paginated(dsl::organization, dsl::id, pagparams) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::silo_id.eq(authz_silo.id())) - .select(Organization::as_select()) - .load_async::(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - pub async fn organizations_list_by_name( - &self, - opctx: &OpContext, - pagparams: &DataPageParams<'_, Name>, - ) -> ListResultVec { - let authz_silo = opctx - .authn - .silo_required() - .internal_context("listing Organizations")?; - opctx.authorize(authz::Action::ListChildren, &authz_silo).await?; - - use db::schema::organization::dsl; - paginated(dsl::organization, dsl::name, pagparams) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::silo_id.eq(authz_silo.id())) - .select(Organization::as_select()) - .load_async::(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - /// Updates a organization by name (clobbering update -- no etag) - pub async fn organization_update( - &self, - opctx: &OpContext, - authz_org: &authz::Organization, - updates: OrganizationUpdate, - ) -> UpdateResult { - use db::schema::organization::dsl; - - opctx.authorize(authz::Action::Modify, authz_org).await?; - diesel::update(dsl::organization) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(authz_org.id())) - .set(updates) - .returning(Organization::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_org), - ) - }) - } - - /// Create a project - pub async fn project_create( - &self, - opctx: &OpContext, - org: &authz::Organization, - project: Project, - ) -> CreateResult { - use db::schema::project::dsl; - - opctx.authorize(authz::Action::CreateChild, org).await?; - - let name = project.name().as_str().to_string(); - let organization_id = project.organization_id; - Organization::insert_resource( - organization_id, - diesel::insert_into(dsl::project).values(project), - ) - .insert_and_get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| match e { - AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { - type_name: ResourceType::Organization, - lookup_type: LookupType::ById(organization_id), - }, - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict(ResourceType::Project, &name), - ) - } - }) - } - - /// Delete a project - // TODO-correctness This needs to check whether there are any resources that - // depend on the Project (Disks, Instances). We can do this with a - // generation counter that gets bumped when these resources are created. - pub async fn project_delete( - &self, - opctx: &OpContext, - authz_project: &authz::Project, - ) -> DeleteResult { - opctx.authorize(authz::Action::Delete, authz_project).await?; - - use db::schema::project::dsl; - - let now = Utc::now(); - diesel::update(dsl::project) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(authz_project.id())) - .set(dsl::time_deleted.eq(now)) - .returning(Project::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_project), - ) - })?; - Ok(()) - } - - pub async fn projects_list_by_id( - &self, - opctx: &OpContext, - authz_org: &authz::Organization, - pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResultVec { - use db::schema::project::dsl; - - opctx.authorize(authz::Action::ListChildren, authz_org).await?; - - paginated(dsl::project, dsl::id, pagparams) - .filter(dsl::organization_id.eq(authz_org.id())) - .filter(dsl::time_deleted.is_null()) - .select(Project::as_select()) - .load_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - pub async fn projects_list_by_name( - &self, - opctx: &OpContext, - authz_org: &authz::Organization, - pagparams: &DataPageParams<'_, Name>, - ) -> ListResultVec { - use db::schema::project::dsl; - - opctx.authorize(authz::Action::ListChildren, authz_org).await?; - - paginated(dsl::project, dsl::name, &pagparams) - .filter(dsl::organization_id.eq(authz_org.id())) - .filter(dsl::time_deleted.is_null()) - .select(Project::as_select()) - .load_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - /// Updates a project (clobbering update -- no etag) - pub async fn project_update( - &self, - opctx: &OpContext, - authz_project: &authz::Project, - updates: ProjectUpdate, - ) -> UpdateResult { - opctx.authorize(authz::Action::Modify, authz_project).await?; - - use db::schema::project::dsl; - diesel::update(dsl::project) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(authz_project.id())) - .set(updates) - .returning(Project::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_project), - ) - }) - } - - // IP Pools - - /// List IP Pools by their name - pub async fn ip_pools_list_by_name( - &self, - opctx: &OpContext, - pagparams: &DataPageParams<'_, Name>, - ) -> ListResultVec { - use db::schema::ip_pool::dsl; - opctx - .authorize(authz::Action::ListChildren, &authz::IP_POOL_LIST) - .await?; - paginated(dsl::ip_pool, dsl::name, pagparams) - .filter(dsl::time_deleted.is_null()) - .select(db::model::IpPool::as_select()) - .get_results_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - /// List IP Pools by their IDs - pub async fn ip_pools_list_by_id( - &self, - opctx: &OpContext, - pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResultVec { - use db::schema::ip_pool::dsl; - opctx - .authorize(authz::Action::ListChildren, &authz::IP_POOL_LIST) - .await?; - paginated(dsl::ip_pool, dsl::id, pagparams) - .filter(dsl::time_deleted.is_null()) - .select(db::model::IpPool::as_select()) - .get_results_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - pub async fn ip_pool_create( - &self, - opctx: &OpContext, - new_pool: ¶ms::IpPoolCreate, - ) -> CreateResult { - use db::schema::ip_pool::dsl; - opctx - .authorize(authz::Action::CreateChild, &authz::IP_POOL_LIST) - .await?; - let pool = IpPool::new(&new_pool.identity); - let pool_name = pool.name().as_str().to_string(); - diesel::insert_into(dsl::ip_pool) - .values(pool) - .returning(IpPool::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict(ResourceType::IpPool, &pool_name), - ) - }) - } - - pub async fn ip_pool_delete( - &self, - opctx: &OpContext, - authz_pool: &authz::IpPool, - db_pool: &IpPool, - ) -> DeleteResult { - use db::schema::ip_pool::dsl; - use db::schema::ip_pool_range; - opctx.authorize(authz::Action::Delete, authz_pool).await?; - - // Verify there are no IP ranges still in this pool - let range = diesel_pool_result_optional( - ip_pool_range::dsl::ip_pool_range - .filter(ip_pool_range::dsl::ip_pool_id.eq(authz_pool.id())) - .filter(ip_pool_range::dsl::time_deleted.is_null()) - .select(ip_pool_range::dsl::id) - .limit(1) - .first_async::(self.pool_authorized(opctx).await?) - .await, - ) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; - if range.is_some() { - return Err(Error::InvalidRequest { - message: - "IP Pool cannot be deleted while it contains IP ranges" - .to_string(), - }); - } - - // Delete the pool, conditional on the rcgen not having changed. This - // protects the delete from occuring if clients created a new IP range - // in between the above check for children and this query. - let now = Utc::now(); - let updated_rows = diesel::update(dsl::ip_pool) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(authz_pool.id())) - .filter(dsl::rcgen.eq(db_pool.rcgen)) - .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_pool), - ) - })?; - - if updated_rows == 0 { - return Err(Error::InvalidRequest { - message: "deletion failed due to concurrent modification" - .to_string(), - }); - } - Ok(()) - } - - pub async fn ip_pool_update( - &self, - opctx: &OpContext, - authz_pool: &authz::IpPool, - updates: IpPoolUpdate, - ) -> UpdateResult { - use db::schema::ip_pool::dsl; - opctx.authorize(authz::Action::Modify, authz_pool).await?; - diesel::update(dsl::ip_pool) - .filter(dsl::id.eq(authz_pool.id())) - .filter(dsl::time_deleted.is_null()) - .set(updates) - .returning(IpPool::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_pool), - ) - }) - } - - pub async fn ip_pool_list_ranges( - &self, - opctx: &OpContext, - authz_pool: &authz::IpPool, - pag_params: &DataPageParams<'_, IpNetwork>, - ) -> ListResultVec { - use db::schema::ip_pool_range::dsl; - opctx.authorize(authz::Action::ListChildren, authz_pool).await?; - paginated(dsl::ip_pool_range, dsl::first_address, pag_params) - .filter(dsl::ip_pool_id.eq(authz_pool.id())) - .filter(dsl::time_deleted.is_null()) - .select(IpPoolRange::as_select()) - .get_results_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_pool), - ) - }) - } - - pub async fn ip_pool_add_range( - &self, - opctx: &OpContext, - authz_pool: &authz::IpPool, - range: &IpRange, - ) -> CreateResult { - use db::schema::ip_pool_range::dsl; - opctx.authorize(authz::Action::CreateChild, authz_pool).await?; - let pool_id = authz_pool.id(); - let new_range = IpPoolRange::new(range, pool_id); - let filter_subquery = FilterOverlappingIpRanges { range: new_range }; - let insert_query = - diesel::insert_into(dsl::ip_pool_range).values(filter_subquery); - IpPool::insert_resource(pool_id, insert_query) - .insert_and_get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - use async_bb8_diesel::ConnectionError::Query; - use async_bb8_diesel::PoolError::Connection; - use diesel::result::Error::NotFound; - - match e { - AsyncInsertError::DatabaseError(Connection(Query( - NotFound, - ))) => { - // We've filtered out the IP addresses the client provided, - // i.e., there's some overlap with existing addresses. - Error::invalid_request( - format!( - "The provided IP range {}-{} overlaps with \ - an existing range", - range.first_address(), - range.last_address(), - ) - .as_str(), - ) - } - AsyncInsertError::CollectionNotFound => { - Error::ObjectNotFound { - type_name: ResourceType::IpPool, - lookup_type: LookupType::ById(pool_id), - } - } - AsyncInsertError::DatabaseError(err) => { - public_error_from_diesel_pool(err, ErrorHandler::Server) - } - } - }) - } - - pub async fn ip_pool_delete_range( - &self, - opctx: &OpContext, - authz_pool: &authz::IpPool, - range: &IpRange, - ) -> DeleteResult { - use db::schema::instance_external_ip; - use db::schema::ip_pool_range::dsl; - opctx.authorize(authz::Action::Modify, authz_pool).await?; - - let pool_id = authz_pool.id(); - let first_address = range.first_address(); - let last_address = range.last_address(); - let first_net = ipnetwork::IpNetwork::from(first_address); - let last_net = ipnetwork::IpNetwork::from(last_address); - - // Fetch the range itself, if it exists. We'll need to protect against - // concurrent inserts of new external IPs from the target range by - // comparing the rcgen. - let range = diesel_pool_result_optional( - dsl::ip_pool_range - .filter(dsl::ip_pool_id.eq(pool_id)) - .filter(dsl::first_address.eq(first_net)) - .filter(dsl::last_address.eq(last_net)) - .filter(dsl::time_deleted.is_null()) - .select(IpPoolRange::as_select()) - .get_result_async::( - self.pool_authorized(opctx).await?, - ) - .await, - ) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))? - .ok_or_else(|| { - Error::invalid_request( - format!( - "The provided range {}-{} does not exist", - first_address, last_address, - ) - .as_str(), - ) - })?; - - // Find external IPs allocated out of this pool and range. - let range_id = range.id; - let has_children = diesel::dsl::select(diesel::dsl::exists( - instance_external_ip::table - .filter(instance_external_ip::dsl::ip_pool_id.eq(pool_id)) - .filter( - instance_external_ip::dsl::ip_pool_range_id.eq(range_id), - ) - .filter(instance_external_ip::dsl::time_deleted.is_null()), - )) - .get_result_async::(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; - if has_children { - return Err(Error::invalid_request( - "IP pool ranges cannot be deleted while \ - external IP addresses are allocated from them", - )); - } - - // Delete the range, conditional on the rcgen not having changed. This - // protects the delete from occuring if clients allocated a new external - // IP address in between the above check for children and this query. - let rcgen = range.rcgen; - let now = Utc::now(); - let updated_rows = diesel::update( - dsl::ip_pool_range - .find(range_id) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::rcgen.eq(rcgen)), - ) - .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; - if updated_rows == 1 { - Ok(()) - } else { - Err(Error::invalid_request( - "IP range deletion failed due to concurrent modification", - )) - } - } - - // External IP addresses - - /// Create an external IP address for an instance. - // TODO-correctness: This should be made idempotent. - pub async fn allocate_instance_external_ip( - &self, - opctx: &OpContext, - instance_id: Uuid, - ) -> CreateResult { - let query = - NextExternalIp::new(IncompleteInstanceExternalIp::new(instance_id)); - query - .get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - use async_bb8_diesel::ConnectionError::Query; - use async_bb8_diesel::PoolError::Connection; - use diesel::result::Error::NotFound; - match e { - Connection(Query(NotFound)) => Error::invalid_request( - "No external IP addresses available for new instance", - ), - _ => public_error_from_diesel_pool(e, ErrorHandler::Server), - } - }) - } - - /// Deallocate the external IP address with the provided ID. - /// - /// To support idempotency, such as in saga operations, this method returns - /// an extra boolean, rather than the usual `DeleteResult`. The meaning of - /// return values are: - /// - /// - `Ok(true)`: The record was deleted during this call - /// - `Ok(false)`: The record was already deleted, such as by a previous - /// call - /// - `Err(_)`: Any other condition, including a non-existent record. - pub async fn deallocate_instance_external_ip( - &self, - opctx: &OpContext, - ip_id: Uuid, - ) -> Result { - use db::schema::instance_external_ip::dsl; - let now = Utc::now(); - diesel::update(dsl::instance_external_ip) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(ip_id)) - .set(dsl::time_deleted.eq(now)) - .check_if_exists::(ip_id) - .execute_and_check(self.pool_authorized(opctx).await?) - .await - .map(|r| match r.status { - UpdateStatus::Updated => true, - UpdateStatus::NotUpdatedButExists => false, - }) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - /// Delete all external IP addresses associated with the provided instance - /// ID. - // TODO-correctness: This should be made idempotent. - pub async fn deallocate_instance_external_ip_by_instance_id( - &self, - opctx: &OpContext, - instance_id: Uuid, - ) -> DeleteResult { - use db::schema::instance_external_ip::dsl; - let now = Utc::now(); - diesel::update(dsl::instance_external_ip) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::instance_id.eq(instance_id)) - .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; - Ok(()) - } - - pub async fn instance_lookup_external_ip( - &self, - opctx: &OpContext, - instance_id: Uuid, - ) -> LookupResult { - use db::schema::instance_external_ip::dsl; - dsl::instance_external_ip - .filter(dsl::instance_id.eq(instance_id)) - .filter(dsl::time_deleted.is_null()) - .select(InstanceExternalIp::as_select()) - .get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - // Instances - - /// Idempotently insert a database record for an Instance - /// - /// This is intended to be used by a saga action. When we say this is - /// idempotent, we mean that if this function succeeds and the caller - /// invokes it again with the same instance id, project id, creation - /// parameters, and initial runtime, then this operation will succeed and - /// return the current object in the database. Because this is intended for - /// use by sagas, we do assume that if the record exists, it should still be - /// in the "Creating" state. If it's in any other state, this function will - /// return with an error on the assumption that we don't really know what's - /// happened or how to proceed. - /// - /// ## Errors - /// - /// In addition to the usual database errors (e.g., no connections - /// available), this function can fail if there is already a different - /// instance (having a different id) with the same name in the same project. - // TODO-design Given that this is really oriented towards the saga - // interface, one wonders if it's even worth having an abstraction here, or - // if sagas shouldn't directly work with the database here (i.e., just do - // what this function does under the hood). - pub async fn project_create_instance( - &self, - instance: Instance, - ) -> CreateResult { - use db::schema::instance::dsl; - - let gen = instance.runtime().gen; - let name = instance.name().clone(); - let instance: Instance = diesel::insert_into(dsl::instance) - .values(instance) - .on_conflict(dsl::id) - .do_nothing() - .returning(Instance::as_returning()) - .get_result_async(self.pool()) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::Instance, - name.as_str(), - ), - ) - })?; - - bail_unless!( - instance.runtime().state.state() - == &api::external::InstanceState::Creating, - "newly-created Instance has unexpected state: {:?}", - instance.runtime().state - ); - bail_unless!( - instance.runtime().gen == gen, - "newly-created Instance has unexpected generation: {:?}", - instance.runtime().gen - ); - Ok(instance) - } - - pub async fn project_list_instances( - &self, - opctx: &OpContext, - authz_project: &authz::Project, - pagparams: &DataPageParams<'_, Name>, - ) -> ListResultVec { - opctx.authorize(authz::Action::ListChildren, authz_project).await?; - - use db::schema::instance::dsl; - paginated(dsl::instance, dsl::name, &pagparams) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::project_id.eq(authz_project.id())) - .select(Instance::as_select()) - .load_async::(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - /// Fetches information about an Instance that the caller has previously - /// fetched - /// - /// See disk_refetch(). - pub async fn instance_refetch( - &self, - opctx: &OpContext, - authz_instance: &authz::Instance, - ) -> LookupResult { - let (.., db_instance) = LookupPath::new(opctx, self) - .instance_id(authz_instance.id()) - .fetch() - .await - .map_err(|e| match e { - // Use the "not found" message of the authz object we were - // given, which will reflect however the caller originally - // looked it up. - Error::ObjectNotFound { .. } => authz_instance.not_found(), - e => e, - })?; - Ok(db_instance) - } - - // TODO-design It's tempting to return the updated state of the Instance - // here because it's convenient for consumers and by using a RETURNING - // clause, we could ensure that the "update" and "fetch" are atomic. - // But in the unusual case that we _don't_ update the row because our - // update is older than the one in the database, we would have to fetch - // the current state explicitly. For now, we'll just require consumers - // to explicitly fetch the state if they want that. - pub async fn instance_update_runtime( - &self, - instance_id: &Uuid, - new_runtime: &InstanceRuntimeState, - ) -> Result { - use db::schema::instance::dsl; - - let updated = diesel::update(dsl::instance) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(*instance_id)) - .filter(dsl::state_generation.lt(new_runtime.gen)) - .filter( - dsl::migration_id - .is_null() - .or(dsl::target_propolis_id.eq(new_runtime.propolis_id)), - ) - .set(new_runtime.clone()) - .check_if_exists::(*instance_id) - .execute_and_check(self.pool()) - .await - .map(|r| match r.status { - UpdateStatus::Updated => true, - UpdateStatus::NotUpdatedButExists => false, - }) - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByLookup( - ResourceType::Instance, - LookupType::ById(*instance_id), - ), - ) - })?; - - Ok(updated) - } - - pub async fn project_delete_instance( - &self, - opctx: &OpContext, - authz_instance: &authz::Instance, - ) -> DeleteResult { - opctx.authorize(authz::Action::Delete, authz_instance).await?; - - // This is subject to change, but for now we're going to say that an - // instance must be "stopped" or "failed" in order to delete it. The - // delete operation sets "time_deleted" (just like with other objects) - // and also sets the state to "destroyed". - use api::external::InstanceState as ApiInstanceState; - use db::model::InstanceState as DbInstanceState; - use db::schema::{disk, instance}; - - let stopped = DbInstanceState::new(ApiInstanceState::Stopped); - let failed = DbInstanceState::new(ApiInstanceState::Failed); - let destroyed = DbInstanceState::new(ApiInstanceState::Destroyed); - let ok_to_delete_instance_states = vec![stopped, failed]; - - let detached_label = api::external::DiskState::Detached.label(); - let ok_to_detach_disk_states = - vec![api::external::DiskState::Attached(authz_instance.id())]; - let ok_to_detach_disk_state_labels: Vec<_> = - ok_to_detach_disk_states.iter().map(|s| s.label()).collect(); - - let _instance = Instance::detach_resources( - authz_instance.id(), - instance::table.into_boxed().filter( - instance::dsl::state.eq_any(ok_to_delete_instance_states), - ), - disk::table.into_boxed().filter( - disk::dsl::disk_state.eq_any(ok_to_detach_disk_state_labels), - ), - diesel::update(instance::dsl::instance).set(( - instance::dsl::state.eq(destroyed), - instance::dsl::time_deleted.eq(Utc::now()), - )), - diesel::update(disk::dsl::disk).set(( - disk::dsl::disk_state.eq(detached_label), - disk::dsl::attach_instance_id.eq(Option::::None), - )), - ) - .detach_and_get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| match e { - DetachManyError::CollectionNotFound => Error::not_found_by_id( - ResourceType::Instance, - &authz_instance.id(), - ), - DetachManyError::NoUpdate { collection } => { - let instance_state = collection.runtime_state.state.state(); - match instance_state { - api::external::InstanceState::Stopped - | api::external::InstanceState::Failed => { - Error::internal_error("cannot delete instance") - } - _ => Error::invalid_request(&format!( - "instance cannot be deleted in state \"{}\"", - instance_state, - )), - } - } - DetachManyError::DatabaseError(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) - } - })?; - - Ok(()) - } - - // Disks - - /// List disks associated with a given instance. - pub async fn instance_list_disks( - &self, - opctx: &OpContext, - authz_instance: &authz::Instance, - pagparams: &DataPageParams<'_, Name>, - ) -> ListResultVec { - use db::schema::disk::dsl; - - opctx.authorize(authz::Action::ListChildren, authz_instance).await?; - - paginated(dsl::disk, dsl::name, &pagparams) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::attach_instance_id.eq(authz_instance.id())) - .select(Disk::as_select()) - .load_async::(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - pub async fn project_create_disk(&self, disk: Disk) -> CreateResult { - use db::schema::disk::dsl; - - let gen = disk.runtime().gen; - let name = disk.name().clone(); - let disk: Disk = diesel::insert_into(dsl::disk) - .values(disk) - .on_conflict(dsl::id) - .do_nothing() - .returning(Disk::as_returning()) - .get_result_async(self.pool()) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict(ResourceType::Disk, name.as_str()), - ) - })?; - - let runtime = disk.runtime(); - bail_unless!( - runtime.state().state() == &api::external::DiskState::Creating, - "newly-created Disk has unexpected state: {:?}", - runtime.disk_state - ); - bail_unless!( - runtime.gen == gen, - "newly-created Disk has unexpected generation: {:?}", - runtime.gen - ); - Ok(disk) - } - - pub async fn project_list_disks( - &self, - opctx: &OpContext, - authz_project: &authz::Project, - pagparams: &DataPageParams<'_, Name>, - ) -> ListResultVec { - opctx.authorize(authz::Action::ListChildren, authz_project).await?; - - use db::schema::disk::dsl; - paginated(dsl::disk, dsl::name, &pagparams) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::project_id.eq(authz_project.id())) - .select(Disk::as_select()) - .load_async::(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - /// Attaches a disk to an instance, if both objects: - /// - Exist - /// - Are in valid states - /// - Are under the maximum "attach count" threshold - pub async fn instance_attach_disk( - &self, - opctx: &OpContext, - authz_instance: &authz::Instance, - authz_disk: &authz::Disk, - max_disks: u32, - ) -> Result<(Instance, Disk), Error> { - use db::schema::{disk, instance}; - - opctx.authorize(authz::Action::Modify, authz_instance).await?; - opctx.authorize(authz::Action::Modify, authz_disk).await?; - - let ok_to_attach_disk_states = vec![ - api::external::DiskState::Creating, - api::external::DiskState::Detached, - ]; - let ok_to_attach_disk_state_labels: Vec<_> = - ok_to_attach_disk_states.iter().map(|s| s.label()).collect(); - - // TODO(https://github.com/oxidecomputer/omicron/issues/811): - // This list of instance attach states is more restrictive than it - // plausibly could be. - // - // We currently only permit attaching disks to stopped instances. - let ok_to_attach_instance_states = vec![ - db::model::InstanceState(api::external::InstanceState::Creating), - db::model::InstanceState(api::external::InstanceState::Stopped), - ]; - - let attached_label = - api::external::DiskState::Attached(authz_instance.id()).label(); - - let (instance, disk) = Instance::attach_resource( - authz_instance.id(), - authz_disk.id(), - instance::table - .into_boxed() - .filter(instance::dsl::state.eq_any(ok_to_attach_instance_states)), - disk::table - .into_boxed() - .filter(disk::dsl::disk_state.eq_any(ok_to_attach_disk_state_labels)), - max_disks, - diesel::update(disk::dsl::disk) - .set(( - disk::dsl::disk_state.eq(attached_label), - disk::dsl::attach_instance_id.eq(authz_instance.id()) - )) - ) - .attach_and_get_result_async(self.pool_authorized(opctx).await?) - .await - .or_else(|e| { - match e { - AttachError::CollectionNotFound => { - Err(Error::not_found_by_id( - ResourceType::Instance, - &authz_instance.id(), - )) - }, - AttachError::ResourceNotFound => { - Err(Error::not_found_by_id( - ResourceType::Disk, - &authz_disk.id(), - )) - }, - AttachError::NoUpdate { attached_count, resource, collection } => { - let disk_state = resource.state().into(); - match disk_state { - // Idempotent errors: We did not perform an update, - // because we're already in the process of attaching. - api::external::DiskState::Attached(id) if id == authz_instance.id() => { - return Ok((collection, resource)); - } - api::external::DiskState::Attaching(id) if id == authz_instance.id() => { - return Ok((collection, resource)); - } - // Ok-to-attach disk states: Inspect the state to infer - // why we did not attach. - api::external::DiskState::Creating | - api::external::DiskState::Detached => { - match collection.runtime_state.state.state() { - // Ok-to-be-attached instance states: - api::external::InstanceState::Creating | - api::external::InstanceState::Stopped => { - // The disk is ready to be attached, and the - // instance is ready to be attached. Perhaps - // we are at attachment capacity? - if attached_count == i64::from(max_disks) { - return Err(Error::invalid_request(&format!( - "cannot attach more than {} disks to instance", - max_disks - ))); - } - - // We can't attach, but the error hasn't - // helped us infer why. - return Err(Error::internal_error( - "cannot attach disk" - )); - } - // Not okay-to-be-attached instance states: - _ => { - Err(Error::invalid_request(&format!( - "cannot attach disk to instance in {} state", - collection.runtime_state.state.state(), - ))) - } - } - }, - // Not-okay-to-attach disk states: The disk is attached elsewhere. - api::external::DiskState::Attached(_) | - api::external::DiskState::Attaching(_) | - api::external::DiskState::Detaching(_) => { - Err(Error::invalid_request(&format!( - "cannot attach disk \"{}\": disk is attached to another instance", - resource.name().as_str(), - ))) - } - _ => { - Err(Error::invalid_request(&format!( - "cannot attach disk \"{}\": invalid state {}", - resource.name().as_str(), - disk_state, - ))) - } - } - }, - AttachError::DatabaseError(e) => { - Err(public_error_from_diesel_pool(e, ErrorHandler::Server)) - }, - } - })?; - - Ok((instance, disk)) - } - - pub async fn instance_detach_disk( - &self, - opctx: &OpContext, - authz_instance: &authz::Instance, - authz_disk: &authz::Disk, - ) -> Result { - use db::schema::{disk, instance}; - - opctx.authorize(authz::Action::Modify, authz_instance).await?; - opctx.authorize(authz::Action::Modify, authz_disk).await?; - - let ok_to_detach_disk_states = - vec![api::external::DiskState::Attached(authz_instance.id())]; - let ok_to_detach_disk_state_labels: Vec<_> = - ok_to_detach_disk_states.iter().map(|s| s.label()).collect(); - - // TODO(https://github.com/oxidecomputer/omicron/issues/811): - // This list of instance detach states is more restrictive than it - // plausibly could be. - // - // We currently only permit detaching disks from stopped instances. - let ok_to_detach_instance_states = vec![ - db::model::InstanceState(api::external::InstanceState::Creating), - db::model::InstanceState(api::external::InstanceState::Stopped), - ]; - - let detached_label = api::external::DiskState::Detached.label(); - - let disk = Instance::detach_resource( - authz_instance.id(), - authz_disk.id(), - instance::table - .into_boxed() - .filter(instance::dsl::state.eq_any(ok_to_detach_instance_states)), - disk::table - .into_boxed() - .filter(disk::dsl::disk_state.eq_any(ok_to_detach_disk_state_labels)), - diesel::update(disk::dsl::disk) - .set(( - disk::dsl::disk_state.eq(detached_label), - disk::dsl::attach_instance_id.eq(Option::::None) - )) - ) - .detach_and_get_result_async(self.pool_authorized(opctx).await?) - .await - .or_else(|e| { - match e { - DetachError::CollectionNotFound => { - Err(Error::not_found_by_id( - ResourceType::Instance, - &authz_instance.id(), - )) - }, - DetachError::ResourceNotFound => { - Err(Error::not_found_by_id( - ResourceType::Disk, - &authz_disk.id(), - )) - }, - DetachError::NoUpdate { resource, collection } => { - let disk_state = resource.state().into(); - match disk_state { - // Idempotent errors: We did not perform an update, - // because we're already in the process of detaching. - api::external::DiskState::Detached => { - return Ok(resource); - } - api::external::DiskState::Detaching(id) if id == authz_instance.id() => { - return Ok(resource); - } - // Ok-to-detach disk states: Inspect the state to infer - // why we did not detach. - api::external::DiskState::Attached(id) if id == authz_instance.id() => { - match collection.runtime_state.state.state() { - // Ok-to-be-detached instance states: - api::external::InstanceState::Creating | - api::external::InstanceState::Stopped => { - // We can't detach, but the error hasn't - // helped us infer why. - return Err(Error::internal_error( - "cannot detach disk" - )); - } - // Not okay-to-be-detached instance states: - _ => { - Err(Error::invalid_request(&format!( - "cannot detach disk from instance in {} state", - collection.runtime_state.state.state(), - ))) - } - } - }, - api::external::DiskState::Attaching(id) if id == authz_instance.id() => { - Err(Error::invalid_request(&format!( - "cannot detach disk \"{}\": disk is currently being attached", - resource.name().as_str(), - ))) - }, - // Not-okay-to-detach disk states: The disk is attached elsewhere. - api::external::DiskState::Attached(_) | - api::external::DiskState::Attaching(_) | - api::external::DiskState::Detaching(_) => { - Err(Error::invalid_request(&format!( - "cannot detach disk \"{}\": disk is attached to another instance", - resource.name().as_str(), - ))) - } - _ => { - Err(Error::invalid_request(&format!( - "cannot detach disk \"{}\": invalid state {}", - resource.name().as_str(), - disk_state, - ))) - } - } - }, - DetachError::DatabaseError(e) => { - Err(public_error_from_diesel_pool(e, ErrorHandler::Server)) - }, - } - })?; - - Ok(disk) - } - - pub async fn disk_update_runtime( - &self, - opctx: &OpContext, - authz_disk: &authz::Disk, - new_runtime: &DiskRuntimeState, - ) -> Result { - // TODO-security This permission might be overloaded here. The way disk - // runtime updates work is that the caller in Nexus first updates the - // Sled Agent to make a change, then updates to the database to reflect - // that change. So by the time we get here, we better have already done - // an authz check, or we will have already made some unauthorized change - // to the system! At the same time, we don't want just anybody to be - // able to modify the database state. So we _do_ still want an authz - // check here. Arguably it's for a different kind of action, but it - // doesn't seem that useful to split it out right now. - opctx.authorize(authz::Action::Modify, authz_disk).await?; - - let disk_id = authz_disk.id(); - use db::schema::disk::dsl; - let updated = diesel::update(dsl::disk) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(disk_id)) - .filter(dsl::state_generation.lt(new_runtime.gen)) - .set(new_runtime.clone()) - .check_if_exists::(disk_id) - .execute_and_check(self.pool()) - .await - .map(|r| match r.status { - UpdateStatus::Updated => true, - UpdateStatus::NotUpdatedButExists => false, - }) - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_disk), - ) - })?; - - Ok(updated) - } - - /// Fetches information about a Disk that the caller has previously fetched - /// - /// The only difference between this function and a new fetch by id is that - /// this function preserves the `authz_disk` that you started with -- which - /// keeps track of how you looked it up. So if you looked it up by name, - /// the authz you get back will reflect that, whereas if you did a fresh - /// lookup by id, it wouldn't. - /// TODO-cleanup this could be provided by the Lookup API for any resource - pub async fn disk_refetch( - &self, - opctx: &OpContext, - authz_disk: &authz::Disk, - ) -> LookupResult { - let (.., db_disk) = LookupPath::new(opctx, self) - .disk_id(authz_disk.id()) - .fetch() - .await - .map_err(|e| match e { - // Use the "not found" message of the authz object we were - // given, which will reflect however the caller originally - // looked it up. - Error::ObjectNotFound { .. } => authz_disk.not_found(), - e => e, - })?; - Ok(db_disk) - } - - /// Updates a disk record to indicate it has been deleted. - /// - /// Returns the volume ID of associated with the deleted disk. - /// - /// Does not attempt to modify any resources (e.g. regions) which may - /// belong to the disk. - // TODO: Delete me (this function, not the disk!), ensure all datastore - // access is auth-checked. - // - // Here's the deal: We have auth checks on access to the database - at the - // time of writing this comment, only a subset of access is protected, and - // "Delete Disk" is actually one of the first targets of this auth check. - // - // However, there are contexts where we want to delete disks *outside* of - // calling the HTTP API-layer "delete disk" endpoint. As one example, during - // the "undo" part of the disk creation saga, we want to allow users to - // delete the disk they (partially) created. - // - // This gets a little tricky mapping back to user permissions - a user - // SHOULD be able to create a disk with the "create" permission, without the - // "delete" permission. To still make the call internally, we'd basically - // need to manufacture a token that identifies the ability to "create a - // disk, or delete a very specific disk with ID = ...". - pub async fn project_delete_disk_no_auth( - &self, - disk_id: &Uuid, - ) -> Result { - use db::schema::disk::dsl; - let pool = self.pool(); - let now = Utc::now(); - - let ok_to_delete_states = vec![ - api::external::DiskState::Detached, - api::external::DiskState::Faulted, - api::external::DiskState::Creating, - ]; - - let ok_to_delete_state_labels: Vec<_> = - ok_to_delete_states.iter().map(|s| s.label()).collect(); - let destroyed = api::external::DiskState::Destroyed.label(); - - let result = diesel::update(dsl::disk) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(*disk_id)) - .filter(dsl::disk_state.eq_any(ok_to_delete_state_labels)) - .filter(dsl::attach_instance_id.is_null()) - .set((dsl::disk_state.eq(destroyed), dsl::time_deleted.eq(now))) - .check_if_exists::(*disk_id) - .execute_and_check(pool) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByLookup( - ResourceType::Disk, - LookupType::ById(*disk_id), - ), - ) - })?; - - match result.status { - UpdateStatus::Updated => Ok(result.found.volume_id), - UpdateStatus::NotUpdatedButExists => { - let disk = result.found; - let disk_state = disk.state(); - if disk.time_deleted().is_some() - && disk_state.state() - == &api::external::DiskState::Destroyed - { - // To maintain idempotency, if the disk has already been - // destroyed, don't throw an error. - return Ok(disk.volume_id); - } else if !ok_to_delete_states.contains(disk_state.state()) { - return Err(Error::InvalidRequest { - message: format!( - "disk cannot be deleted in state \"{}\"", - disk.runtime_state.disk_state - ), - }); - } else if disk_state.is_attached() { - return Err(Error::InvalidRequest { - message: String::from("disk is attached"), - }); - } else { - // NOTE: This is a "catch-all" error case, more specific - // errors should be preferred as they're more actionable. - return Err(Error::InternalError { - internal_message: String::from( - "disk exists, but cannot be deleted", - ), - }); - } - } - } - } - - // Network interfaces - - /// Create a network interface attached to the provided instance. - pub async fn instance_create_network_interface( - &self, - opctx: &OpContext, - authz_subnet: &authz::VpcSubnet, - authz_instance: &authz::Instance, - interface: IncompleteNetworkInterface, - ) -> Result { - opctx - .authorize(authz::Action::CreateChild, authz_instance) - .await - .map_err(network_interface::InsertError::External)?; - opctx - .authorize(authz::Action::CreateChild, authz_subnet) - .await - .map_err(network_interface::InsertError::External)?; - self.instance_create_network_interface_raw(&opctx, interface).await - } - - pub(super) async fn instance_create_network_interface_raw( - &self, - opctx: &OpContext, - interface: IncompleteNetworkInterface, - ) -> Result { - use db::schema::network_interface::dsl; - let query = network_interface::InsertQuery::new(interface.clone()); - diesel::insert_into(dsl::network_interface) - .values(query) - .returning(NetworkInterface::as_returning()) - .get_result_async( - self.pool_authorized(opctx) - .await - .map_err(network_interface::InsertError::External)?, - ) - .await - .map_err(|e| { - network_interface::InsertError::from_pool(e, &interface) - }) - } - - /// Delete all network interfaces attached to the given instance. - // NOTE: This is mostly useful in the context of sagas, but might be helpful - // in other situations, such as moving an instance between VPC Subnets. - pub async fn instance_delete_all_network_interfaces( - &self, - opctx: &OpContext, - authz_instance: &authz::Instance, - ) -> DeleteResult { - opctx.authorize(authz::Action::Modify, authz_instance).await?; - - use db::schema::network_interface::dsl; - let now = Utc::now(); - diesel::update(dsl::network_interface) - .filter(dsl::instance_id.eq(authz_instance.id())) - .filter(dsl::time_deleted.is_null()) - .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_instance), - ) - })?; - Ok(()) - } - - /// Delete a `NetworkInterface` attached to a provided instance. - /// - /// Note that the primary interface for an instance cannot be deleted if - /// there are any secondary interfaces. - pub async fn instance_delete_network_interface( - &self, - opctx: &OpContext, - authz_instance: &authz::Instance, - authz_interface: &authz::NetworkInterface, - ) -> Result<(), network_interface::DeleteError> { - opctx - .authorize(authz::Action::Delete, authz_interface) - .await - .map_err(network_interface::DeleteError::External)?; - let query = network_interface::DeleteQuery::new( - authz_instance.id(), - authz_interface.id(), - ); - query - .clone() - .execute_async( - self.pool_authorized(opctx) - .await - .map_err(network_interface::DeleteError::External)?, - ) - .await - .map_err(|e| { - network_interface::DeleteError::from_pool(e, &query) - })?; - Ok(()) - } - - /// Return the information about an instance's network interfaces required - /// for the sled agent to instantiate them via OPTE. - /// - /// OPTE requires information that's currently split across the network - /// interface and VPC subnet tables. This query just joins those for each - /// NIC in the given instance. - pub(crate) async fn derive_guest_network_interface_info( - &self, - opctx: &OpContext, - authz_instance: &authz::Instance, - ) -> ListResultVec { - opctx.authorize(authz::Action::ListChildren, authz_instance).await?; - - use db::schema::network_interface; - use db::schema::vpc; - use db::schema::vpc_subnet; - - // The record type for the results of the below JOIN query - #[derive(Debug, diesel::Queryable)] - struct NicInfo { - name: db::model::Name, - ip: ipnetwork::IpNetwork, - mac: db::model::MacAddr, - ipv4_block: db::model::Ipv4Net, - ipv6_block: db::model::Ipv6Net, - vni: db::model::Vni, - primary: bool, - slot: i16, - } - - impl From for sled_client_types::NetworkInterface { - fn from(nic: NicInfo) -> sled_client_types::NetworkInterface { - let ip_subnet = if nic.ip.is_ipv4() { - external::IpNet::V4(nic.ipv4_block.0) - } else { - external::IpNet::V6(nic.ipv6_block.0) - }; - sled_client_types::NetworkInterface { - name: sled_client_types::Name::from(&nic.name.0), - ip: nic.ip.ip(), - mac: sled_client_types::MacAddr::from(nic.mac.0), - subnet: sled_client_types::IpNet::from(ip_subnet), - vni: sled_client_types::Vni::from(nic.vni.0), - primary: nic.primary, - slot: u8::try_from(nic.slot).unwrap(), - } - } - } - - let rows = network_interface::table - .filter(network_interface::instance_id.eq(authz_instance.id())) - .filter(network_interface::time_deleted.is_null()) - .inner_join( - vpc_subnet::table - .on(network_interface::subnet_id.eq(vpc_subnet::id)), - ) - .inner_join(vpc::table.on(vpc_subnet::vpc_id.eq(vpc::id))) - .order_by(network_interface::slot) - // TODO-cleanup: Having to specify each column again is less than - // ideal, but we can't derive `Selectable` since this is the result - // of a JOIN and not from a single table. DRY this out if possible. - .select(( - network_interface::name, - network_interface::ip, - network_interface::mac, - vpc_subnet::ipv4_block, - vpc_subnet::ipv6_block, - vpc::vni, - network_interface::is_primary, - network_interface::slot, - )) - .get_results_async::(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; - Ok(rows - .into_iter() - .map(sled_client_types::NetworkInterface::from) - .collect()) - } - - /// List network interfaces associated with a given instance. - pub async fn instance_list_network_interfaces( - &self, - opctx: &OpContext, - authz_instance: &authz::Instance, - pagparams: &DataPageParams<'_, Name>, - ) -> ListResultVec { - opctx.authorize(authz::Action::ListChildren, authz_instance).await?; - - use db::schema::network_interface::dsl; - paginated(dsl::network_interface, dsl::name, &pagparams) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::instance_id.eq(authz_instance.id())) - .select(NetworkInterface::as_select()) - .load_async::(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - /// Update a network interface associated with a given instance. - pub async fn instance_update_network_interface( - &self, - opctx: &OpContext, - authz_instance: &authz::Instance, - authz_interface: &authz::NetworkInterface, - updates: NetworkInterfaceUpdate, - ) -> UpdateResult { - use crate::db::schema::network_interface::dsl; - - // This database operation is surprisingly subtle. It's possible to - // express this in a single query, with multiple common-table - // expressions for the updated rows. For example, if we're setting a new - // primary interface, we need to set the `is_primary` column to false - // for the current primary, and then set it to true, along with any - // other updates, for the new primary. - // - // That's feasible, but there's a CRDB bug that affects some queries - // with multiple update statements. It's possible that this query isn't - // in that bucket, but we'll still avoid it for now. Instead, we'll bite - // the bullet and use a transaction. - // - // See https://github.com/oxidecomputer/omicron/issues/1204 for the - // issue tracking the work to move this into a CTE. - - // Build up some of the queries first, outside the transaction. - // - // This selects the existing primary interface. - let instance_id = authz_instance.id(); - let interface_id = authz_interface.id(); - let find_primary_query = dsl::network_interface - .filter(dsl::instance_id.eq(instance_id)) - .filter(dsl::is_primary.eq(true)) - .filter(dsl::time_deleted.is_null()) - .select(NetworkInterface::as_select()); - - // This returns the state of the associated instance. - let instance_query = db::schema::instance::dsl::instance - .filter(db::schema::instance::dsl::id.eq(instance_id)) - .filter(db::schema::instance::dsl::time_deleted.is_null()) - .select(Instance::as_select()); - let stopped = - db::model::InstanceState::new(external::InstanceState::Stopped); - - // This is the actual query to update the target interface. - let make_primary = matches!(updates.make_primary, Some(true)); - let update_target_query = diesel::update(dsl::network_interface) - .filter(dsl::id.eq(interface_id)) - .filter(dsl::time_deleted.is_null()) - .set(updates) - .returning(NetworkInterface::as_returning()); - - // Errors returned from the below transactions. - #[derive(Debug)] - enum NetworkInterfaceUpdateError { - InstanceNotStopped, - FailedToUnsetPrimary(diesel::result::Error), - } - type TxnError = TransactionError; - - let pool = self.pool_authorized(opctx).await?; - if make_primary { - pool.transaction(move |conn| { - let instance_state = - instance_query.get_result(conn)?.runtime_state.state; - if instance_state != stopped { - return Err(TxnError::CustomError( - NetworkInterfaceUpdateError::InstanceNotStopped, - )); - } - - // First, get the primary interface - let primary_interface = find_primary_query.get_result(conn)?; - - // If the target and primary are different, we need to toggle - // the primary into a secondary. - if primary_interface.identity.id != interface_id { - if let Err(e) = diesel::update(dsl::network_interface) - .filter(dsl::id.eq(primary_interface.identity.id)) - .filter(dsl::time_deleted.is_null()) - .set(dsl::is_primary.eq(false)) - .execute(conn) - { - return Err(TxnError::CustomError( - NetworkInterfaceUpdateError::FailedToUnsetPrimary( - e, - ), - )); - } - } - - // In any case, update the actual target - Ok(update_target_query.get_result(conn)?) - }) - } else { - // In this case, we can just directly apply the updates. By - // construction, `updates.make_primary` is `None`, so nothing will - // be done there. The other columns always need to be updated, and - // we're only hitting a single row. Note that we still need to - // verify the instance is stopped. - pool.transaction(move |conn| { - let instance_state = - instance_query.get_result(conn)?.runtime_state.state; - if instance_state != stopped { - return Err(TxnError::CustomError( - NetworkInterfaceUpdateError::InstanceNotStopped, - )); - } - Ok(update_target_query.get_result(conn)?) - }) - } - .await - .map_err(|e| match e { - TxnError::CustomError( - NetworkInterfaceUpdateError::InstanceNotStopped, - ) => Error::invalid_request( - "Instance must be stopped to update its network interfaces", - ), - _ => Error::internal_error(&format!("Transaction error: {:?}", e)), - }) - } - - // Create a record for a new Oximeter instance - pub async fn oximeter_create( - &self, - info: &OximeterInfo, - ) -> Result<(), Error> { - use db::schema::oximeter::dsl; - - // If we get a conflict on the Oximeter ID, this means that collector instance was - // previously registered, and it's re-registering due to something like a service restart. - // In this case, we update the time modified and the service address, rather than - // propagating a constraint violation to the caller. - diesel::insert_into(dsl::oximeter) - .values(*info) - .on_conflict(dsl::id) - .do_update() - .set(( - dsl::time_modified.eq(Utc::now()), - dsl::ip.eq(info.ip), - dsl::port.eq(info.port), - )) - .execute_async(self.pool()) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::Oximeter, - "Oximeter Info", - ), - ) - })?; - Ok(()) - } - - // List the oximeter collector instances - pub async fn oximeter_list( - &self, - page_params: &DataPageParams<'_, Uuid>, - ) -> ListResultVec { - use db::schema::oximeter::dsl; - paginated(dsl::oximeter, dsl::id, page_params) - .load_async::(self.pool()) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - // Create a record for a new producer endpoint - pub async fn producer_endpoint_create( - &self, - producer: &ProducerEndpoint, - ) -> Result<(), Error> { - use db::schema::metric_producer::dsl; - - // TODO: see https://github.com/oxidecomputer/omicron/issues/323 - diesel::insert_into(dsl::metric_producer) - .values(producer.clone()) - .on_conflict(dsl::id) - .do_update() - .set(( - dsl::time_modified.eq(Utc::now()), - dsl::ip.eq(producer.ip), - dsl::port.eq(producer.port), - dsl::interval.eq(producer.interval), - dsl::base_route.eq(producer.base_route.clone()), - )) - .execute_async(self.pool()) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::MetricProducer, - "Producer Endpoint", - ), - ) - })?; - Ok(()) - } - - // List the producer endpoint records by the oximeter instance to which they're assigned. - pub async fn producers_list_by_oximeter_id( - &self, - oximeter_id: Uuid, - pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResultVec { - use db::schema::metric_producer::dsl; - paginated(dsl::metric_producer, dsl::id, &pagparams) - .filter(dsl::oximeter_id.eq(oximeter_id)) - .order_by((dsl::oximeter_id, dsl::id)) - .select(ProducerEndpoint::as_select()) - .load_async(self.pool()) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::MetricProducer, - "By Oximeter ID", - ), - ) - }) - } - - // Sagas - - pub async fn saga_create( - &self, - saga: &db::saga_types::Saga, - ) -> Result<(), Error> { - use db::schema::saga::dsl; - - let name = saga.template_name.clone(); - diesel::insert_into(dsl::saga) - .values(saga.clone()) - .execute_async(self.pool()) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict(ResourceType::SagaDbg, &name), - ) - })?; - Ok(()) - } - - pub async fn saga_create_event( - &self, - event: &db::saga_types::SagaNodeEvent, - ) -> Result<(), Error> { - use db::schema::saga_node_event::dsl; - - // TODO-robustness This INSERT ought to be conditional on this SEC still - // owning this saga. - diesel::insert_into(dsl::saga_node_event) - .values(event.clone()) - .execute_async(self.pool()) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict(ResourceType::SagaDbg, "Saga Event"), - ) - })?; - Ok(()) - } - - pub async fn saga_update_state( - &self, - saga_id: steno::SagaId, - new_state: steno::SagaCachedState, - current_sec: db::saga_types::SecId, - current_adopt_generation: Generation, - ) -> Result<(), Error> { - use db::schema::saga::dsl; - - let saga_id: db::saga_types::SagaId = saga_id.into(); - let result = diesel::update(dsl::saga) - .filter(dsl::id.eq(saga_id)) - .filter(dsl::current_sec.eq(current_sec)) - .filter(dsl::adopt_generation.eq(current_adopt_generation)) - .set(dsl::saga_state.eq(db::saga_types::SagaCachedState(new_state))) - .check_if_exists::(saga_id) - .execute_and_check(self.pool()) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByLookup( - ResourceType::SagaDbg, - LookupType::ById(saga_id.0.into()), - ), - ) - })?; - - match result.status { - UpdateStatus::Updated => Ok(()), - UpdateStatus::NotUpdatedButExists => Err(Error::InvalidRequest { - message: format!( - "failed to update saga {:?} with state {:?}: preconditions not met: \ - expected current_sec = {:?}, adopt_generation = {:?}, \ - but found current_sec = {:?}, adopt_generation = {:?}, state = {:?}", - saga_id, - new_state, - current_sec, - current_adopt_generation, - result.found.current_sec, - result.found.adopt_generation, - result.found.saga_state, - ) - }), - } - } - - pub async fn saga_list_unfinished_by_id( - &self, - sec_id: &db::SecId, - pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResultVec { - use db::schema::saga::dsl; - paginated(dsl::saga, dsl::id, &pagparams) - .filter(dsl::saga_state.ne(db::saga_types::SagaCachedState( - steno::SagaCachedState::Done, - ))) - .filter(dsl::current_sec.eq(*sec_id)) - .load_async(self.pool()) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByLookup( - ResourceType::SagaDbg, - LookupType::ById(sec_id.0), - ), - ) - }) - } - - pub async fn saga_node_event_list_by_id( - &self, - id: db::saga_types::SagaId, - pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResultVec { - use db::schema::saga_node_event::dsl; - paginated(dsl::saga_node_event, dsl::saga_id, &pagparams) - .filter(dsl::saga_id.eq(id)) - .load_async::(self.pool()) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByLookup( - ResourceType::SagaDbg, - LookupType::ById(id.0 .0), - ), - ) - })? - .into_iter() - .map(|db_event| steno::SagaNodeEvent::try_from(db_event)) - .collect::>() - } - - // VPCs - - pub async fn project_list_vpcs( - &self, - opctx: &OpContext, - authz_project: &authz::Project, - pagparams: &DataPageParams<'_, Name>, - ) -> ListResultVec { - opctx.authorize(authz::Action::ListChildren, authz_project).await?; - - use db::schema::vpc::dsl; - paginated(dsl::vpc, dsl::name, &pagparams) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::project_id.eq(authz_project.id())) - .select(Vpc::as_select()) - .load_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - pub async fn project_create_vpc( - &self, - opctx: &OpContext, - authz_project: &authz::Project, - vpc: IncompleteVpc, - ) -> Result<(authz::Vpc, Vpc), Error> { - use db::schema::vpc::dsl; - - assert_eq!(authz_project.id(), vpc.project_id); - opctx.authorize(authz::Action::CreateChild, authz_project).await?; - - // TODO-correctness Shouldn't this use "insert_resource"? - // - // Note that to do so requires adding an `rcgen` column to the project - // table. - let name = vpc.identity.name.clone(); - let query = InsertVpcQuery::new(vpc); - let vpc = diesel::insert_into(dsl::vpc) - .values(query) - .returning(Vpc::as_returning()) - .get_result_async(self.pool()) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict(ResourceType::Vpc, name.as_str()), - ) - })?; - Ok(( - authz::Vpc::new( - authz_project.clone(), - vpc.id(), - LookupType::ByName(vpc.name().to_string()), - ), - vpc, - )) - } - - pub async fn project_update_vpc( - &self, - opctx: &OpContext, - authz_vpc: &authz::Vpc, - updates: VpcUpdate, - ) -> UpdateResult { - opctx.authorize(authz::Action::Modify, authz_vpc).await?; - - use db::schema::vpc::dsl; - diesel::update(dsl::vpc) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(authz_vpc.id())) - .set(updates) - .returning(Vpc::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_vpc), - ) - }) - } - - pub async fn project_delete_vpc( - &self, - opctx: &OpContext, - authz_vpc: &authz::Vpc, - ) -> DeleteResult { - opctx.authorize(authz::Action::Delete, authz_vpc).await?; - - use db::schema::vpc::dsl; - - // Note that we don't ensure the firewall rules are empty here, because - // we allow deleting VPCs with firewall rules present. Inserting new - // rules is serialized with respect to the deletion by the row lock - // associated with the VPC row, since we use the collection insert CTE - // pattern to add firewall rules. - - let now = Utc::now(); - diesel::update(dsl::vpc) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(authz_vpc.id())) - .set(dsl::time_deleted.eq(now)) - .returning(Vpc::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_vpc), - ) - })?; - Ok(()) - } - - pub async fn vpc_list_firewall_rules( - &self, - opctx: &OpContext, - authz_vpc: &authz::Vpc, - ) -> ListResultVec { - // Firewall rules are modeled in the API as a single resource under the - // Vpc (rather than individual child resources with their own CRUD - // endpoints). You cannot look them up individually, create them, - // remove them, or update them. You can only modify the whole set. So - // for authz, we treat them as part of the Vpc itself. - opctx.authorize(authz::Action::Read, authz_vpc).await?; - use db::schema::vpc_firewall_rule::dsl; - - dsl::vpc_firewall_rule - .filter(dsl::time_deleted.is_null()) - .filter(dsl::vpc_id.eq(authz_vpc.id())) - .order(dsl::name.asc()) - .select(VpcFirewallRule::as_select()) - .load_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - pub async fn vpc_delete_all_firewall_rules( - &self, - opctx: &OpContext, - authz_vpc: &authz::Vpc, - ) -> DeleteResult { - opctx.authorize(authz::Action::Modify, authz_vpc).await?; - use db::schema::vpc_firewall_rule::dsl; - - let now = Utc::now(); - // TODO-performance: Paginate this update to avoid long queries - diesel::update(dsl::vpc_firewall_rule) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::vpc_id.eq(authz_vpc.id())) - .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_vpc), - ) - })?; - Ok(()) - } - - /// Replace all firewall rules with the given rules - pub async fn vpc_update_firewall_rules( - &self, - opctx: &OpContext, - authz_vpc: &authz::Vpc, - mut rules: Vec, - ) -> UpdateResult> { - opctx.authorize(authz::Action::Modify, authz_vpc).await?; - for r in &rules { - assert_eq!(r.vpc_id, authz_vpc.id()); - } - - // Sort the rules in the same order that we would return them when - // listing them. This is because we're going to use RETURNING to return - // the inserted rows from the database and we want them to come back in - // the same order that we would normally list them. - rules.sort_by_key(|r| r.name().to_string()); - - use db::schema::vpc_firewall_rule::dsl; - - let now = Utc::now(); - let delete_old_query = diesel::update(dsl::vpc_firewall_rule) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::vpc_id.eq(authz_vpc.id())) - .set(dsl::time_deleted.eq(now)); - - let insert_new_query = Vpc::insert_resource( - authz_vpc.id(), - diesel::insert_into(dsl::vpc_firewall_rule).values(rules), - ); - - #[derive(Debug)] - enum FirewallUpdateError { - CollectionNotFound, - } - type TxnError = TransactionError; - - // TODO-scalability: Ideally this would be a CTE so we don't need to - // hold a transaction open across multiple roundtrips from the database, - // but for now we're using a transaction due to the severely decreased - // legibility of CTEs via diesel right now. - self.pool_authorized(opctx) - .await? - .transaction(move |conn| { - delete_old_query.execute(conn)?; - - // The generation count update on the vpc table row will take a - // write lock on the row, ensuring that the vpc was not deleted - // concurently. - insert_new_query.insert_and_get_results(conn).map_err(|e| { - match e { - SyncInsertError::CollectionNotFound => { - TxnError::CustomError( - FirewallUpdateError::CollectionNotFound, - ) - } - SyncInsertError::DatabaseError(e) => e.into(), - } - }) - }) - .await - .map_err(|e| match e { - TxnError::CustomError( - FirewallUpdateError::CollectionNotFound, - ) => Error::not_found_by_id(ResourceType::Vpc, &authz_vpc.id()), - TxnError::Pool(e) => public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_vpc), - ), - }) - } - - pub async fn vpc_list_subnets( - &self, - opctx: &OpContext, - authz_vpc: &authz::Vpc, - pagparams: &DataPageParams<'_, Name>, - ) -> ListResultVec { - opctx.authorize(authz::Action::ListChildren, authz_vpc).await?; - - use db::schema::vpc_subnet::dsl; - paginated(dsl::vpc_subnet, dsl::name, &pagparams) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::vpc_id.eq(authz_vpc.id())) - .select(VpcSubnet::as_select()) - .load_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - /// Insert a VPC Subnet, checking for unique IP address ranges. - pub async fn vpc_create_subnet( - &self, - opctx: &OpContext, - authz_vpc: &authz::Vpc, - subnet: VpcSubnet, - ) -> Result { - opctx - .authorize(authz::Action::CreateChild, authz_vpc) - .await - .map_err(SubnetError::External)?; - assert_eq!(authz_vpc.id(), subnet.vpc_id); - - self.vpc_create_subnet_raw(subnet).await - } - - pub(super) async fn vpc_create_subnet_raw( - &self, - subnet: VpcSubnet, - ) -> Result { - use db::schema::vpc_subnet::dsl; - let values = FilterConflictingVpcSubnetRangesQuery::new(subnet.clone()); - diesel::insert_into(dsl::vpc_subnet) - .values(values) - .returning(VpcSubnet::as_returning()) - .get_result_async(self.pool()) - .await - .map_err(|e| SubnetError::from_pool(e, &subnet)) - } - - pub async fn vpc_delete_subnet( - &self, - opctx: &OpContext, - authz_subnet: &authz::VpcSubnet, - ) -> DeleteResult { - opctx.authorize(authz::Action::Delete, authz_subnet).await?; - - use db::schema::vpc_subnet::dsl; - let now = Utc::now(); - diesel::update(dsl::vpc_subnet) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(authz_subnet.id())) - .set(dsl::time_deleted.eq(now)) - .returning(VpcSubnet::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_subnet), - ) - })?; - Ok(()) - } - - pub async fn vpc_update_subnet( - &self, - opctx: &OpContext, - authz_subnet: &authz::VpcSubnet, - updates: VpcSubnetUpdate, - ) -> UpdateResult { - opctx.authorize(authz::Action::Modify, authz_subnet).await?; - - use db::schema::vpc_subnet::dsl; - diesel::update(dsl::vpc_subnet) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(authz_subnet.id())) - .set(updates) - .returning(VpcSubnet::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_subnet), - ) - }) - } - - pub async fn subnet_list_network_interfaces( - &self, - opctx: &OpContext, - authz_subnet: &authz::VpcSubnet, - pagparams: &DataPageParams<'_, Name>, - ) -> ListResultVec { - opctx.authorize(authz::Action::ListChildren, authz_subnet).await?; - - use db::schema::network_interface::dsl; - paginated(dsl::network_interface, dsl::name, pagparams) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::subnet_id.eq(authz_subnet.id())) - .select(NetworkInterface::as_select()) - .load_async::( - self.pool_authorized(opctx).await?, - ) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - pub async fn vpc_list_routers( - &self, - opctx: &OpContext, - authz_vpc: &authz::Vpc, - pagparams: &DataPageParams<'_, Name>, - ) -> ListResultVec { - opctx.authorize(authz::Action::ListChildren, authz_vpc).await?; - - use db::schema::vpc_router::dsl; - paginated(dsl::vpc_router, dsl::name, pagparams) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::vpc_id.eq(authz_vpc.id())) - .select(VpcRouter::as_select()) - .load_async::( - self.pool_authorized(opctx).await?, - ) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - pub async fn vpc_create_router( - &self, - opctx: &OpContext, - authz_vpc: &authz::Vpc, - router: VpcRouter, - ) -> CreateResult<(authz::VpcRouter, VpcRouter)> { - opctx.authorize(authz::Action::CreateChild, authz_vpc).await?; - - use db::schema::vpc_router::dsl; - let name = router.name().clone(); - let router = diesel::insert_into(dsl::vpc_router) - .values(router) - .on_conflict(dsl::id) - .do_nothing() - .returning(VpcRouter::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::VpcRouter, - name.as_str(), - ), - ) - })?; - Ok(( - authz::VpcRouter::new( - authz_vpc.clone(), - router.id(), - LookupType::ById(router.id()), - ), - router, - )) - } - - pub async fn vpc_delete_router( - &self, - opctx: &OpContext, - authz_router: &authz::VpcRouter, - ) -> DeleteResult { - opctx.authorize(authz::Action::Delete, authz_router).await?; - - use db::schema::vpc_router::dsl; - let now = Utc::now(); - diesel::update(dsl::vpc_router) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(authz_router.id())) - .set(dsl::time_deleted.eq(now)) - .returning(VpcRouter::as_returning()) - .get_result_async(self.pool()) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_router), - ) - })?; - Ok(()) - } - - pub async fn vpc_update_router( - &self, - opctx: &OpContext, - authz_router: &authz::VpcRouter, - updates: VpcRouterUpdate, - ) -> UpdateResult { - opctx.authorize(authz::Action::Modify, authz_router).await?; - - use db::schema::vpc_router::dsl; - diesel::update(dsl::vpc_router) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(authz_router.id())) - .set(updates) - .returning(VpcRouter::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_router), - ) - }) - } - - pub async fn router_list_routes( - &self, - opctx: &OpContext, - authz_router: &authz::VpcRouter, - pagparams: &DataPageParams<'_, Name>, - ) -> ListResultVec { - opctx.authorize(authz::Action::ListChildren, authz_router).await?; - - use db::schema::router_route::dsl; - paginated(dsl::router_route, dsl::name, pagparams) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::vpc_router_id.eq(authz_router.id())) - .select(RouterRoute::as_select()) - .load_async::( - self.pool_authorized(opctx).await?, - ) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - pub async fn router_create_route( - &self, - opctx: &OpContext, - authz_router: &authz::VpcRouter, - route: RouterRoute, - ) -> CreateResult { - assert_eq!(authz_router.id(), route.vpc_router_id); - opctx.authorize(authz::Action::CreateChild, authz_router).await?; - - use db::schema::router_route::dsl; - let router_id = route.vpc_router_id; - let name = route.name().clone(); - - VpcRouter::insert_resource( - router_id, - diesel::insert_into(dsl::router_route).values(route), - ) - .insert_and_get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| match e { - AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { - type_name: ResourceType::VpcRouter, - lookup_type: LookupType::ById(router_id), - }, - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::RouterRoute, - name.as_str(), - ), - ) - } - }) - } - - pub async fn router_delete_route( - &self, - opctx: &OpContext, - authz_route: &authz::RouterRoute, - ) -> DeleteResult { - opctx.authorize(authz::Action::Delete, authz_route).await?; - - use db::schema::router_route::dsl; - let now = Utc::now(); - diesel::update(dsl::router_route) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(authz_route.id())) - .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_route), - ) - })?; - Ok(()) - } - - pub async fn router_update_route( - &self, - opctx: &OpContext, - authz_route: &authz::RouterRoute, - route_update: RouterRouteUpdate, - ) -> UpdateResult { - opctx.authorize(authz::Action::Modify, authz_route).await?; - - use db::schema::router_route::dsl; - diesel::update(dsl::router_route) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(authz_route.id())) - .set(route_update) - .returning(RouterRoute::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_route), - ) - }) - } - - // TODO-correctness: fix session method errors. the map_errs turn all errors - // into 500s, most notably (and most frequently) session not found. they - // don't end up as 500 in the http response because they get turned into a - // 4xx error by calling code, the session cookie authn scheme. this is - // necessary for now in order to avoid the possibility of leaking out a - // too-friendly 404 to the client. once datastore has its own error type and - // the conversion to serializable user-facing errors happens elsewhere (see - // issue #347) these methods can safely return more accurate errors, and - // showing/hiding that info as appropriate will be handled higher up - // TODO-correctness this may apply at the Nexus level as well. - - pub async fn session_create( - &self, - opctx: &OpContext, - session: ConsoleSession, - ) -> CreateResult { - opctx - .authorize(authz::Action::CreateChild, &authz::CONSOLE_SESSION_LIST) - .await?; - - use db::schema::console_session::dsl; - - diesel::insert_into(dsl::console_session) - .values(session) - .returning(ConsoleSession::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - Error::internal_error(&format!( - "error creating session: {:?}", - e - )) - }) - } - - pub async fn session_update_last_used( - &self, - opctx: &OpContext, - authz_session: &authz::ConsoleSession, - ) -> UpdateResult { - opctx.authorize(authz::Action::Modify, authz_session).await?; - - use db::schema::console_session::dsl; - let console_session = diesel::update(dsl::console_session) - .filter(dsl::token.eq(authz_session.id())) - .set((dsl::time_last_used.eq(Utc::now()),)) - .returning(ConsoleSession::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - Error::internal_error(&format!( - "error renewing session: {:?}", - e - )) - })?; - - let (.., db_silo_user) = LookupPath::new(opctx, &self) - .silo_user_id(console_session.silo_user_id) - .fetch() - .await - .map_err(|e| { - Error::internal_error(&format!( - "error fetching silo id: {:?}", - e - )) - })?; - - Ok(authn::ConsoleSessionWithSiloId { - console_session, - silo_id: db_silo_user.silo_id, - }) - } - - // putting "hard" in the name because we don't do this with any other model - pub async fn session_hard_delete( - &self, - opctx: &OpContext, - authz_session: &authz::ConsoleSession, - ) -> DeleteResult { - // We don't do a typical authz check here. Instead, knowing that every - // user is allowed to delete their own session, the query below filters - // on the session's silo_user_id matching the current actor's id. - // - // We could instead model this more like other authz checks. That would - // involve fetching the session record from the database, storing the - // associated silo_user_id into the `authz::ConsoleSession`, and having - // an Oso rule saying you can delete a session whose associated silo - // user matches the authenticated actor. This would be a fair bit more - // complicated and more work at runtime work than what we're doing here. - // The tradeoff is that we're effectively encoding policy here, but it - // seems worth it in this case. - let actor = opctx - .authn - .actor_required() - .internal_context("deleting current user's session")?; - - // This check shouldn't be required in that there should be no overlap - // between silo user ids and other types of identity ids. But it's easy - // to check, and if we add another type of Actor, we'll be forced here - // to consider if they should be able to have console sessions and log - // out of them. - let silo_user_id = match actor.actor_type() { - IdentityType::SiloUser => actor.actor_id(), - IdentityType::UserBuiltin => { - return Err(Error::invalid_request("not a Silo user")) - } - }; - - use db::schema::console_session::dsl; - diesel::delete(dsl::console_session) - .filter(dsl::silo_user_id.eq(silo_user_id)) - .filter(dsl::token.eq(authz_session.id())) - .execute_async(self.pool_authorized(opctx).await?) - .await - .map(|_rows_deleted| ()) - .map_err(|e| { - Error::internal_error(&format!( - "error deleting session: {:?}", - e - )) - }) - } - - pub async fn silo_users_list_by_id( - &self, - opctx: &OpContext, - authz_silo: &authz::Silo, - pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResultVec { - use db::schema::silo_user::dsl; - - opctx.authorize(authz::Action::Read, authz_silo).await?; - paginated(dsl::silo_user, dsl::id, pagparams) - .filter(dsl::silo_id.eq(authz_silo.id())) - .filter(dsl::time_deleted.is_null()) - .select(SiloUser::as_select()) - .load_async::(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - pub async fn users_builtin_list_by_name( - &self, - opctx: &OpContext, - pagparams: &DataPageParams<'_, Name>, - ) -> ListResultVec { - use db::schema::user_builtin::dsl; - opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; - paginated(dsl::user_builtin, dsl::name, pagparams) - .select(UserBuiltin::as_select()) - .load_async::(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - /// Load built-in users into the database - pub async fn load_builtin_users( - &self, - opctx: &OpContext, - ) -> Result<(), Error> { - use db::schema::user_builtin::dsl; - - opctx.authorize(authz::Action::Modify, &authz::DATABASE).await?; - - let builtin_users = [ - // Note: "db_init" is also a builtin user, but that one by necessity - // is created with the database. - &*authn::USER_SERVICE_BALANCER, - &*authn::USER_INTERNAL_API, - &*authn::USER_INTERNAL_READ, - &*authn::USER_EXTERNAL_AUTHN, - &*authn::USER_SAGA_RECOVERY, - ] - .iter() - .map(|u| { - UserBuiltin::new( - u.id, - params::UserBuiltinCreate { - identity: IdentityMetadataCreateParams { - name: u.name.clone(), - description: String::from(u.description), - }, - }, - ) - }) - .collect::>(); - - debug!(opctx.log, "attempting to create built-in users"); - let count = diesel::insert_into(dsl::user_builtin) - .values(builtin_users) - .on_conflict(dsl::id) - .do_nothing() - .execute_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; - info!(opctx.log, "created {} built-in users", count); - - Ok(()) - } - - /// Load the testing users into the database - pub async fn load_silo_users( - &self, - opctx: &OpContext, - ) -> Result<(), Error> { - use db::schema::silo_user::dsl; - - opctx.authorize(authz::Action::Modify, &authz::DATABASE).await?; - - let users = - [&*authn::USER_TEST_PRIVILEGED, &*authn::USER_TEST_UNPRIVILEGED]; - - debug!(opctx.log, "attempting to create silo users"); - let count = diesel::insert_into(dsl::silo_user) - .values(users) - .on_conflict(dsl::id) - .do_nothing() - .execute_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; - info!(opctx.log, "created {} silo users", count); - - Ok(()) - } - - /// Load role assignments for the test users into the database - pub async fn load_silo_user_role_assignments( - &self, - opctx: &OpContext, - ) -> Result<(), Error> { - use db::schema::role_assignment::dsl; - debug!(opctx.log, "attempting to create silo user role assignments"); - let count = diesel::insert_into(dsl::role_assignment) - .values(&*db::fixed_data::silo_user::ROLE_ASSIGNMENTS_PRIVILEGED) - .on_conflict(( - dsl::identity_type, - dsl::identity_id, - dsl::resource_type, - dsl::resource_id, - dsl::role_name, - )) - .do_nothing() - .execute_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; - info!(opctx.log, "created {} silo user role assignments", count); - - Ok(()) - } - - /// List built-in roles - pub async fn roles_builtin_list_by_name( - &self, - opctx: &OpContext, - pagparams: &DataPageParams<'_, (String, String)>, - ) -> ListResultVec { - use db::schema::role_builtin::dsl; - opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; - paginated_multicolumn( - dsl::role_builtin, - (dsl::resource_type, dsl::role_name), - pagparams, - ) - .select(RoleBuiltin::as_select()) - .load_async::(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - /// Load built-in roles into the database - pub async fn load_builtin_roles( - &self, - opctx: &OpContext, - ) -> Result<(), Error> { - use db::schema::role_builtin::dsl; - - opctx.authorize(authz::Action::Modify, &authz::DATABASE).await?; - - let builtin_roles = BUILTIN_ROLES - .iter() - .map(|role_config| { - RoleBuiltin::new( - role_config.resource_type, - &role_config.role_name, - &role_config.description, - ) - }) - .collect::>(); - - debug!(opctx.log, "attempting to create built-in roles"); - let count = diesel::insert_into(dsl::role_builtin) - .values(builtin_roles) - .on_conflict((dsl::resource_type, dsl::role_name)) - .do_nothing() - .execute_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; - info!(opctx.log, "created {} built-in roles", count); - Ok(()) - } - - /// Load role assignments for built-in users and built-in roles into the - /// database - pub async fn load_builtin_role_asgns( - &self, - opctx: &OpContext, - ) -> Result<(), Error> { - use db::schema::role_assignment::dsl; - - opctx.authorize(authz::Action::Modify, &authz::DATABASE).await?; - - debug!(opctx.log, "attempting to create built-in role assignments"); - let count = diesel::insert_into(dsl::role_assignment) - .values(&*BUILTIN_ROLE_ASSIGNMENTS) - .on_conflict(( - dsl::identity_type, - dsl::identity_id, - dsl::resource_type, - dsl::resource_id, - dsl::role_name, - )) - .do_nothing() - .execute_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; - info!(opctx.log, "created {} built-in role assignments", count); - Ok(()) - } - - /// Return the built-in roles that the given built-in user has for the given - /// resource - pub async fn role_asgn_list_for( - &self, - opctx: &OpContext, - identity_type: IdentityType, - identity_id: Uuid, - resource_type: ResourceType, - resource_id: Uuid, - ) -> Result, Error> { - use db::schema::role_assignment::dsl; - - // There is no resource-specific authorization check because all - // authenticated users need to be able to list their own roles -- - // otherwise we can't do any authorization checks. - // TODO-security rethink this -- how do we know the user is looking up - // their own roles? Maybe this should use an internal authz context. - - // TODO-scalability TODO-security This needs to be paginated. It's not - // exposed via an external API right now but someone could still put us - // into some hurt by assigning loads of roles to someone and having that - // person attempt to access anything. - dsl::role_assignment - .filter(dsl::identity_type.eq(identity_type)) - .filter(dsl::identity_id.eq(identity_id)) - .filter(dsl::resource_type.eq(resource_type.to_string())) - .filter(dsl::resource_id.eq(resource_id)) - .select(RoleAssignment::as_select()) - .load_async::(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - pub async fn update_available_artifact_upsert( - &self, - opctx: &OpContext, - artifact: UpdateAvailableArtifact, - ) -> CreateResult { - opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; - - use db::schema::update_available_artifact::dsl; - diesel::insert_into(dsl::update_available_artifact) - .values(artifact.clone()) - .on_conflict((dsl::name, dsl::version, dsl::kind)) - .do_update() - .set(artifact.clone()) - .returning(UpdateAvailableArtifact::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - pub async fn update_available_artifact_hard_delete_outdated( - &self, - opctx: &OpContext, - current_targets_role_version: i64, - ) -> DeleteResult { - opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; - - // We use the `targets_role_version` column in the table to delete any - // old rows, keeping the table in sync with the current copy of - // artifacts.json. - use db::schema::update_available_artifact::dsl; - diesel::delete(dsl::update_available_artifact) - .filter(dsl::targets_role_version.lt(current_targets_role_version)) - .execute_async(self.pool_authorized(opctx).await?) - .await - .map(|_rows_deleted| ()) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - .internal_context("deleting outdated available artifacts") - } - - /// Create a silo user - pub async fn silo_user_create( - &self, - silo_user: SiloUser, - ) -> Result { - use db::schema::silo_user::dsl; - - let silo_user_external_id = silo_user.external_id.clone(); - diesel::insert_into(dsl::silo_user) - .values(silo_user) - .returning(SiloUser::as_returning()) - .get_result_async(self.pool()) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::SiloUser, - &silo_user_external_id, - ), - ) - }) - } - - /// Given an external ID, return - /// - Ok(Some(SiloUser)) if that external id refers to an existing silo user - /// - Ok(None) if it does not - /// - Err(...) if there was an error doing this lookup. - pub async fn silo_user_fetch_by_external_id( - &self, - opctx: &OpContext, - authz_silo: &authz::Silo, - external_id: &str, - ) -> Result, Error> { - opctx.authorize(authz::Action::ListChildren, authz_silo).await?; - - use db::schema::silo_user::dsl; - - Ok(dsl::silo_user - .filter(dsl::silo_id.eq(authz_silo.id())) - .filter(dsl::external_id.eq(external_id.to_string())) - .filter(dsl::time_deleted.is_null()) - .select(SiloUser::as_select()) - .load_async::(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByLookup( - ResourceType::SiloUser, - LookupType::ByName(external_id.to_string()), - ), - ) - })? - .pop()) - } - - /// Load built-in silos into the database - pub async fn load_builtin_silos( - &self, - opctx: &OpContext, - ) -> Result<(), Error> { - opctx.authorize(authz::Action::Modify, &authz::DATABASE).await?; - - debug!(opctx.log, "attempting to create built-in silo"); - - use db::schema::silo::dsl; - let count = diesel::insert_into(dsl::silo) - .values(&*DEFAULT_SILO) - .on_conflict(dsl::id) - .do_nothing() - .execute_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; - info!(opctx.log, "created {} built-in silos", count); - Ok(()) - } - - pub async fn silo_create( - &self, - opctx: &OpContext, - silo: Silo, - ) -> CreateResult { - opctx.authorize(authz::Action::CreateChild, &authz::FLEET).await?; - - let silo_id = silo.id(); - - use db::schema::silo::dsl; - diesel::insert_into(dsl::silo) - .values(silo) - .returning(Silo::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::Silo, - silo_id.to_string().as_str(), - ), - ) - }) - } - - pub async fn silos_list_by_id( - &self, - opctx: &OpContext, - pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResultVec { - opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; - - use db::schema::silo::dsl; - paginated(dsl::silo, dsl::id, pagparams) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::discoverable.eq(true)) - .select(Silo::as_select()) - .load_async::(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - pub async fn silos_list_by_name( - &self, - opctx: &OpContext, - pagparams: &DataPageParams<'_, Name>, - ) -> ListResultVec { - opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; - - use db::schema::silo::dsl; - paginated(dsl::silo, dsl::name, pagparams) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::discoverable.eq(true)) - .select(Silo::as_select()) - .load_async::(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - pub async fn silo_delete( - &self, - opctx: &OpContext, - authz_silo: &authz::Silo, - db_silo: &db::model::Silo, - ) -> DeleteResult { - assert_eq!(authz_silo.id(), db_silo.id()); - opctx.authorize(authz::Action::Delete, authz_silo).await?; - - use db::schema::organization; - use db::schema::silo; - use db::schema::silo_user; - - // Make sure there are no organizations present within this silo. - let id = authz_silo.id(); - let rcgen = db_silo.rcgen; - let org_found = diesel_pool_result_optional( - organization::dsl::organization - .filter(organization::dsl::silo_id.eq(id)) - .filter(organization::dsl::time_deleted.is_null()) - .select(organization::dsl::id) - .limit(1) - .first_async::(self.pool_authorized(opctx).await?) - .await, - ) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; - - if org_found.is_some() { - return Err(Error::InvalidRequest { - message: "silo to be deleted contains an organization" - .to_string(), - }); - } - - let now = Utc::now(); - let updated_rows = diesel::update(silo::dsl::silo) - .filter(silo::dsl::time_deleted.is_null()) - .filter(silo::dsl::id.eq(id)) - .filter(silo::dsl::rcgen.eq(rcgen)) - .set(silo::dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_silo), - ) - })?; - - if updated_rows == 0 { - return Err(Error::InvalidRequest { - message: "silo deletion failed due to concurrent modification" - .to_string(), - }); - } - - info!(opctx.log, "deleted silo {}", id); - - // If silo deletion succeeded, delete all silo users - // TODO-correctness This needs to happen in a saga or some other - // mechanism that ensures it happens even if we crash at this point. - // TODO-scalability This needs to happen in batches - let updated_rows = diesel::update(silo_user::dsl::silo_user) - .filter(silo_user::dsl::silo_id.eq(id)) - .filter(silo_user::dsl::time_deleted.is_null()) - .set(silo_user::dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_silo), - ) - })?; - - info!(opctx.log, "deleted {} silo users for silo {}", updated_rows, id); - - // delete all silo identity providers - use db::schema::identity_provider::dsl as idp_dsl; - - let updated_rows = diesel::update(idp_dsl::identity_provider) - .filter(idp_dsl::silo_id.eq(id)) - .filter(idp_dsl::time_deleted.is_null()) - .set(idp_dsl::time_deleted.eq(Utc::now())) - .execute_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_silo), - ) - })?; - - info!(opctx.log, "deleted {} silo IdPs for silo {}", updated_rows, id); - - use db::schema::saml_identity_provider::dsl as saml_idp_dsl; - - let updated_rows = diesel::update(saml_idp_dsl::saml_identity_provider) - .filter(saml_idp_dsl::silo_id.eq(id)) - .filter(saml_idp_dsl::time_deleted.is_null()) - .set(saml_idp_dsl::time_deleted.eq(Utc::now())) - .execute_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_silo), - ) - })?; - - info!( - opctx.log, - "deleted {} silo saml IdPs for silo {}", updated_rows, id - ); - - Ok(()) - } - - pub async fn identity_provider_list( - &self, - opctx: &OpContext, - authz_silo: &authz::Silo, - pagparams: &DataPageParams<'_, Name>, - ) -> ListResultVec { - opctx - .authorize(authz::Action::ListIdentityProviders, authz_silo) - .await?; - - use db::schema::identity_provider::dsl; - paginated(dsl::identity_provider, dsl::name, pagparams) - .filter(dsl::silo_id.eq(authz_silo.id())) - .filter(dsl::time_deleted.is_null()) - .select(IdentityProvider::as_select()) - .load_async::(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - pub async fn saml_identity_provider_create( - &self, - opctx: &OpContext, - authz_silo: &authz::Silo, - provider: db::model::SamlIdentityProvider, - ) -> CreateResult { - opctx.authorize(authz::Action::CreateChild, authz_silo).await?; - - let name = provider.identity().name.to_string(); - self.pool_authorized(opctx) - .await? - .transaction(move |conn| { - // insert silo identity provider record with type Saml - use db::schema::identity_provider::dsl as idp_dsl; - diesel::insert_into(idp_dsl::identity_provider) - .values(db::model::IdentityProvider { - identity: db::model::IdentityProviderIdentity { - id: provider.identity.id, - name: provider.identity.name.clone(), - description: provider.identity.description.clone(), - time_created: provider.identity.time_created, - time_modified: provider.identity.time_modified, - time_deleted: provider.identity.time_deleted, - }, - silo_id: provider.silo_id, - provider_type: db::model::IdentityProviderType::Saml, - }) - .execute(conn)?; - - // insert silo saml identity provider record - use db::schema::saml_identity_provider::dsl; - let result = diesel::insert_into(dsl::saml_identity_provider) - .values(provider) - .returning(db::model::SamlIdentityProvider::as_returning()) - .get_result(conn)?; - - Ok(result) - }) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::SamlIdentityProvider, - &name, - ), - ) - }) - } - - /// Return the next available IPv6 address for an Oxide service running on - /// the provided sled. - pub async fn next_ipv6_address( - &self, - opctx: &OpContext, - sled_id: Uuid, - ) -> Result { - use db::schema::sled::dsl; - let net = diesel::update( - dsl::sled.find(sled_id).filter(dsl::time_deleted.is_null()), - ) - .set(dsl::last_used_address.eq(dsl::last_used_address + 1)) - .returning(dsl::last_used_address) - .get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByLookup( - ResourceType::Sled, - LookupType::ById(sled_id), - ), - ) - })?; - - // TODO-correctness: We need to ensure that this address is actually - // within the sled's underlay prefix, once that's included in the - // database record. - match net { - ipnetwork::IpNetwork::V6(net) => Ok(net.ip()), - _ => Err(Error::InternalError { - internal_message: String::from("Sled IP address must be IPv6"), - }), - } - } - - pub async fn global_image_list_images( - &self, - opctx: &OpContext, - pagparams: &DataPageParams<'_, Name>, - ) -> ListResultVec { - opctx - .authorize(authz::Action::ListChildren, &authz::GLOBAL_IMAGE_LIST) - .await?; - - use db::schema::global_image::dsl; - paginated(dsl::global_image, dsl::name, pagparams) - .filter(dsl::time_deleted.is_null()) - .select(GlobalImage::as_select()) - .load_async::(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - pub async fn global_image_create_image( - &self, - opctx: &OpContext, - image: GlobalImage, - ) -> CreateResult { - opctx - .authorize(authz::Action::CreateChild, &authz::GLOBAL_IMAGE_LIST) - .await?; - - use db::schema::global_image::dsl; - let name = image.name().clone(); - diesel::insert_into(dsl::global_image) - .values(image) - .on_conflict(dsl::id) - .do_nothing() - .returning(GlobalImage::as_returning()) - .get_result_async(self.pool()) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict(ResourceType::Image, name.as_str()), - ) - }) - } - - // SSH public keys - - pub async fn ssh_keys_list( - &self, - opctx: &OpContext, - authz_user: &authz::SiloUser, - page_params: &DataPageParams<'_, Name>, - ) -> ListResultVec { - opctx.authorize(authz::Action::ListChildren, authz_user).await?; - - use db::schema::ssh_key::dsl; - paginated(dsl::ssh_key, dsl::name, page_params) - .filter(dsl::silo_user_id.eq(authz_user.id())) - .filter(dsl::time_deleted.is_null()) - .select(SshKey::as_select()) - .load_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - /// Create a new SSH public key for a user. - pub async fn ssh_key_create( - &self, - opctx: &OpContext, - authz_user: &authz::SiloUser, - ssh_key: SshKey, - ) -> CreateResult { - assert_eq!(authz_user.id(), ssh_key.silo_user_id); - opctx.authorize(authz::Action::CreateChild, authz_user).await?; - let name = ssh_key.name().to_string(); - - use db::schema::ssh_key::dsl; - diesel::insert_into(dsl::ssh_key) - .values(ssh_key) - .returning(SshKey::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict(ResourceType::SshKey, &name), - ) - }) - } - - /// Delete an existing SSH public key. - pub async fn ssh_key_delete( - &self, - opctx: &OpContext, - authz_ssh_key: &authz::SshKey, - ) -> DeleteResult { - opctx.authorize(authz::Action::Delete, authz_ssh_key).await?; - - use db::schema::ssh_key::dsl; - diesel::update(dsl::ssh_key) - .filter(dsl::id.eq(authz_ssh_key.id())) - .filter(dsl::time_deleted.is_null()) - .set(dsl::time_deleted.eq(Utc::now())) - .check_if_exists::(authz_ssh_key.id()) - .execute_and_check(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByResource(authz_ssh_key), - ) - })?; - Ok(()) - } - - // Role assignments - - /// Fetches all of the externally-visible role assignments for the specified - /// resource - /// - /// Role assignments for internal identities (e.g., built-in users) are not - /// included in this list. - /// - /// This function is generic over all resources that can accept roles (e.g., - /// Fleet, Silo, Organization, etc.). - // TODO-scalability In an ideal world, this would be paginated. The impact - // is mitigated because we cap the number of role assignments per resource - // pretty tightly. - pub async fn role_assignment_fetch_visible< - T: authz::ApiResourceWithRoles + Clone, - >( - &self, - opctx: &OpContext, - authz_resource: &T, - ) -> ListResultVec { - opctx.authorize(authz::Action::ReadPolicy, authz_resource).await?; - let resource_type = authz_resource.resource_type(); - let resource_id = authz_resource.resource_id(); - use db::schema::role_assignment::dsl; - dsl::role_assignment - .filter(dsl::resource_type.eq(resource_type.to_string())) - .filter(dsl::resource_id.eq(resource_id)) - .filter(dsl::identity_type.ne(IdentityType::UserBuiltin)) - .order(dsl::role_name.asc()) - .then_order_by(dsl::identity_id.asc()) - .select(RoleAssignment::as_select()) - .load_async::(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - /// Removes all existing externally-visble role assignments on - /// `authz_resource` and adds those specified by `new_assignments` - /// - /// Role assignments for internal identities (e.g., built-in users) are not - /// affected. - /// - /// The expectation is that the caller will have just fetched the role - /// assignments, modified them, and is giving us the complete new list. - /// - /// This function is generic over all resources that can accept roles (e.g., - /// Fleet, Silo, Organization, etc.). - // TODO-correctness As with the rest of the API, we're lacking an ability - // for an ETag precondition check here. - // TODO-scalability In an ideal world, this would update in batches. That's - // tricky without first-classing the Policy in the database. The impact is - // mitigated because we cap the number of role assignments per resource - // pretty tightly. - pub async fn role_assignment_replace_visible( - &self, - opctx: &OpContext, - authz_resource: &T, - new_assignments: &[shared::RoleAssignment], - ) -> ListResultVec - where - T: authz::ApiResourceWithRolesType + Clone, - { - // TODO-security We should carefully review what permissions are - // required for modifying the policy of a resource. - opctx.authorize(authz::Action::ModifyPolicy, authz_resource).await?; - bail_unless!( - new_assignments.len() <= shared::MAX_ROLE_ASSIGNMENTS_PER_RESOURCE - ); - - let resource_type = authz_resource.resource_type(); - let resource_id = authz_resource.resource_id(); - - // Sort the records in the same order that we would return them when - // listing them. This is because we're going to use RETURNING to return - // the inserted rows from the database and we want them to come back in - // the same order that we would normally list them. - let mut new_assignments = new_assignments - .iter() - .map(|r| { - db::model::RoleAssignment::new( - db::model::IdentityType::from(r.identity_type), - r.identity_id, - resource_type, - resource_id, - &r.role_name.to_database_string(), - ) - }) - .collect::>(); - new_assignments.sort_by(|r1, r2| { - (&r1.role_name, r1.identity_id) - .cmp(&(&r2.role_name, r2.identity_id)) - }); - - use db::schema::role_assignment::dsl; - let delete_old_query = diesel::delete(dsl::role_assignment) - .filter(dsl::resource_id.eq(resource_id)) - .filter(dsl::resource_type.eq(resource_type.to_string())) - .filter(dsl::identity_type.ne(IdentityType::UserBuiltin)); - let insert_new_query = diesel::insert_into(dsl::role_assignment) - .values(new_assignments) - .returning(RoleAssignment::as_returning()); - - // TODO-scalability: Ideally this would be a batched transaction so we - // don't need to hold a transaction open across multiple roundtrips from - // the database, but for now we're using a transaction due to the - // severely decreased legibility of CTEs via diesel right now. - // We might instead want to first-class the idea of Policies in the - // database so that we can build up a whole new Policy in batches and - // then flip the resource over to using it. - self.pool_authorized(opctx) - .await? - .transaction(move |conn| { - delete_old_query.execute(conn)?; - Ok(insert_new_query.get_results(conn)?) - }) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - // OAuth 2.0 Device Authorization Grant - - /// Start a device authorization grant flow by recording the request - /// and initial response parameters. - pub async fn device_auth_request_create( - &self, - opctx: &OpContext, - auth_request: DeviceAuthRequest, - ) -> CreateResult { - opctx - .authorize( - authz::Action::CreateChild, - &authz::DEVICE_AUTH_REQUEST_LIST, - ) - .await?; - - use db::schema::device_auth_request::dsl; - diesel::insert_into(dsl::device_auth_request) - .values(auth_request) - .returning(DeviceAuthRequest::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) - } - - /// Remove the device authorization request and create a new device - /// access token record. The token may already be expired if the flow - /// was not completed in time. - pub async fn device_access_token_create( - &self, - opctx: &OpContext, - authz_request: &authz::DeviceAuthRequest, - authz_user: &authz::SiloUser, - access_token: DeviceAccessToken, - ) -> CreateResult { - assert_eq!(authz_user.id(), access_token.silo_user_id); - opctx.authorize(authz::Action::Delete, authz_request).await?; - opctx.authorize(authz::Action::CreateChild, authz_user).await?; - - use db::schema::device_auth_request::dsl as request_dsl; - let delete_request = diesel::delete(request_dsl::device_auth_request) - .filter(request_dsl::user_code.eq(authz_request.id())); - - use db::schema::device_access_token::dsl as token_dsl; - let insert_token = diesel::insert_into(token_dsl::device_access_token) - .values(access_token) - .returning(DeviceAccessToken::as_returning()); - - #[derive(Debug)] - enum TokenGrantError { - RequestNotFound, - TooManyRequests, - } - type TxnError = TransactionError; - - self.pool_authorized(opctx) - .await? - .transaction(move |conn| match delete_request.execute(conn)? { - 0 => { - Err(TxnError::CustomError(TokenGrantError::RequestNotFound)) - } - 1 => Ok(insert_token.get_result(conn)?), - _ => Err(TxnError::CustomError( - TokenGrantError::TooManyRequests, - )), - }) - .await - .map_err(|e| match e { - TxnError::CustomError(TokenGrantError::RequestNotFound) => { - Error::ObjectNotFound { - type_name: ResourceType::DeviceAuthRequest, - lookup_type: LookupType::ByCompositeId( - authz_request.id(), - ), - } - } - TxnError::CustomError(TokenGrantError::TooManyRequests) => { - Error::internal_error("unexpectedly found multiple device auth requests for the same user code") - } - TxnError::Pool(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) - } - }) - } - - /// Look up a granted device access token. - /// Note: since this lookup is not by a primary key or name, - /// (though it does use a unique index), it does not fit the - /// usual lookup machinery pattern. It therefore does include - /// any authz checks. However, the device code is a single-use - /// high-entropy random token, and so should not be guessable - /// by an attacker. - pub async fn device_access_token_fetch( - &self, - opctx: &OpContext, - client_id: Uuid, - device_code: String, - ) -> LookupResult { - use db::schema::device_access_token::dsl; - dsl::device_access_token - .filter(dsl::client_id.eq(client_id)) - .filter(dsl::device_code.eq(device_code)) - .select(DeviceAccessToken::as_select()) - .get_result_async(self.pool_authorized(opctx).await?) - .await - .map_err(|e| { - public_error_from_diesel_pool( - e, - ErrorHandler::NotFoundByLookup( - ResourceType::DeviceAccessToken, - LookupType::ByCompositeId( - "client_id, device_code".to_string(), - ), - ), - ) - }) - } - - // Test interfaces - - #[cfg(test)] - async fn test_try_table_scan(&self, opctx: &OpContext) -> Error { - use db::schema::project::dsl; - let conn = self.pool_authorized(opctx).await; - if let Err(error) = conn { - return error; - } - let result = dsl::project - .select(diesel::dsl::count_star()) - .first_async::(conn.unwrap()) - .await; - match result { - Ok(_) => Error::internal_error("table scan unexpectedly succeeded"), - Err(error) => { - public_error_from_diesel_pool(error, ErrorHandler::Server) - } - } - } -} - -/// Constructs a DataStore for use in test suites that has preloaded the -/// built-in users, roles, and role assignments that are needed for basic -/// operation -#[cfg(test)] -pub async fn datastore_test( - logctx: &dropshot::test_util::LogContext, - db: &omicron_test_utils::dev::db::CockroachInstance, -) -> (OpContext, Arc) { - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = Arc::new(db::Pool::new(&cfg)); - let datastore = Arc::new(DataStore::new(pool)); - - // Create an OpContext with the credentials of "db-init" just for the - // purpose of loading the built-in users, roles, and assignments. - let opctx = OpContext::for_background( - logctx.log.new(o!()), - Arc::new(authz::Authz::new(&logctx.log)), - authn::Context::internal_db_init(), - Arc::clone(&datastore), - ); - datastore.load_builtin_users(&opctx).await.unwrap(); - datastore.load_builtin_roles(&opctx).await.unwrap(); - datastore.load_builtin_role_asgns(&opctx).await.unwrap(); - datastore.load_builtin_silos(&opctx).await.unwrap(); - datastore.load_silo_users(&opctx).await.unwrap(); - datastore.load_silo_user_role_assignments(&opctx).await.unwrap(); - - // Create an OpContext with the credentials of "test-privileged" for general - // testing. - let opctx = - OpContext::for_tests(logctx.log.new(o!()), Arc::clone(&datastore)); - - (opctx, datastore) -} - -#[cfg(test)] -mod test { - use super::*; - use crate::authz; - use crate::db::explain::ExplainableAsync; - use crate::db::fixed_data::silo::SILO_ID; - use crate::db::identity::Resource; - use crate::db::lookup::LookupPath; - use crate::db::model::InstanceExternalIp; - use crate::db::model::{ConsoleSession, DatasetKind, Project, ServiceKind}; - use crate::external_api::params; - use chrono::{Duration, Utc}; - use nexus_test_utils::db::test_setup_database; - use omicron_common::api::external::{ - ByteCount, Error, IdentityMetadataCreateParams, LookupType, Name, - }; - use omicron_test_utils::dev; - use std::collections::HashSet; - use std::net::Ipv6Addr; - use std::net::SocketAddrV6; - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::sync::Arc; - use uuid::Uuid; - - #[tokio::test] - async fn test_project_creation() { - let logctx = dev::test_setup_log("test_project_creation"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; - let organization = params::OrganizationCreate { - identity: IdentityMetadataCreateParams { - name: "org".parse().unwrap(), - description: "desc".to_string(), - }, - }; - - let organization = - datastore.organization_create(&opctx, &organization).await.unwrap(); - - let project = Project::new( - organization.id(), - params::ProjectCreate { - identity: IdentityMetadataCreateParams { - name: "project".parse().unwrap(), - description: "desc".to_string(), - }, - }, - ); - let (.., authz_org) = LookupPath::new(&opctx, &datastore) - .organization_id(organization.id()) - .lookup_for(authz::Action::CreateChild) - .await - .unwrap(); - datastore.project_create(&opctx, &authz_org, project).await.unwrap(); - - let (.., organization_after_project_create) = - LookupPath::new(&opctx, &datastore) - .organization_name(organization.name()) - .fetch() - .await - .unwrap(); - assert!(organization_after_project_create.rcgen > organization.rcgen); - - db.cleanup().await.unwrap(); - logctx.cleanup_successful(); - } - - #[tokio::test] - async fn test_session_methods() { - let logctx = dev::test_setup_log("test_session_methods"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; - let authn_opctx = OpContext::for_background( - logctx.log.new(o!("component" => "TestExternalAuthn")), - Arc::new(authz::Authz::new(&logctx.log)), - authn::Context::external_authn(), - Arc::clone(&datastore), - ); - - let token = "a_token".to_string(); - let silo_user_id = Uuid::new_v4(); - - let session = ConsoleSession { - token: token.clone(), - time_created: Utc::now() - Duration::minutes(5), - time_last_used: Utc::now() - Duration::minutes(5), - silo_user_id, - }; - - let _ = datastore - .session_create(&authn_opctx, session.clone()) - .await - .unwrap(); - - // Associate silo with user - datastore - .silo_user_create(SiloUser::new( - *SILO_ID, - silo_user_id, - "external_id".into(), - )) - .await - .unwrap(); - - let (.., db_silo_user) = LookupPath::new(&opctx, &datastore) - .silo_user_id(session.silo_user_id) - .fetch() - .await - .unwrap(); - assert_eq!(*SILO_ID, db_silo_user.silo_id); - - // fetch the one we just created - let (.., fetched) = LookupPath::new(&opctx, &datastore) - .console_session_token(&token) - .fetch() - .await - .unwrap(); - assert_eq!(session.silo_user_id, fetched.silo_user_id); - - // trying to insert the same one again fails - let duplicate = - datastore.session_create(&authn_opctx, session.clone()).await; - assert!(matches!( - duplicate, - Err(Error::InternalError { internal_message: _ }) - )); - - // update last used (i.e., renew token) - let authz_session = authz::ConsoleSession::new( - authz::FLEET, - token.clone(), - LookupType::ByCompositeId(token.clone()), - ); - let renewed = datastore - .session_update_last_used(&opctx, &authz_session) - .await - .unwrap(); - assert!( - renewed.console_session.time_last_used > session.time_last_used - ); - - // time_last_used change persists in DB - let (.., fetched) = LookupPath::new(&opctx, &datastore) - .console_session_token(&token) - .fetch() - .await - .unwrap(); - assert!(fetched.time_last_used > session.time_last_used); - - // deleting it using `opctx` (which represents the test-privileged user) - // should succeed but not do anything -- you can't delete someone else's - // session - let delete = - datastore.session_hard_delete(&opctx, &authz_session).await; - assert_eq!(delete, Ok(())); - let fetched = LookupPath::new(&opctx, &datastore) - .console_session_token(&token) - .fetch() - .await; - assert!(fetched.is_ok()); - - // delete it and fetch should come back with nothing - let silo_user_opctx = OpContext::for_background( - logctx.log.new(o!()), - Arc::new(authz::Authz::new(&logctx.log)), - authn::Context::test_silo_user(*SILO_ID, silo_user_id), - Arc::clone(&datastore), - ); - let delete = datastore - .session_hard_delete(&silo_user_opctx, &authz_session) - .await; - assert_eq!(delete, Ok(())); - let fetched = LookupPath::new(&opctx, &datastore) - .console_session_token(&token) - .fetch() - .await; - assert!(matches!( - fetched, - Err(Error::ObjectNotFound { type_name: _, lookup_type: _ }) - )); - - // deleting an already nonexistent is considered a success - let delete_again = - datastore.session_hard_delete(&opctx, &authz_session).await; - assert_eq!(delete_again, Ok(())); - - db.cleanup().await.unwrap(); - logctx.cleanup_successful(); - } - - // Creates a test sled, returns its UUID. - async fn create_test_sled(datastore: &DataStore) -> Uuid { - let bogus_addr = SocketAddrV6::new( - Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 1), - 8080, - 0, - 0, - ); - let rack_id = Uuid::new_v4(); - let sled_id = Uuid::new_v4(); - let sled = Sled::new(sled_id, bogus_addr.clone(), rack_id); - datastore.sled_upsert(sled).await.unwrap(); - sled_id - } - - fn test_zpool_size() -> ByteCount { - ByteCount::from_gibibytes_u32(100) - } - - // Creates a test zpool, returns its UUID. - async fn create_test_zpool(datastore: &DataStore, sled_id: Uuid) -> Uuid { - let zpool_id = Uuid::new_v4(); - let zpool = Zpool::new( - zpool_id, - sled_id, - &crate::internal_api::params::ZpoolPutRequest { - size: test_zpool_size(), - }, - ); - datastore.zpool_upsert(zpool).await.unwrap(); - zpool_id - } - - fn create_test_disk_create_params( - name: &str, - size: ByteCount, - ) -> params::DiskCreate { - params::DiskCreate { - identity: IdentityMetadataCreateParams { - name: Name::try_from(name.to_string()).unwrap(), - description: name.to_string(), - }, - disk_source: params::DiskSource::Blank { - block_size: params::BlockSize::try_from(4096).unwrap(), - }, - size, - } - } - - #[tokio::test] - async fn test_region_allocation() { - let logctx = dev::test_setup_log("test_region_allocation"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new(&cfg); - let datastore = Arc::new(DataStore::new(Arc::new(pool))); - let opctx = - OpContext::for_tests(logctx.log.new(o!()), datastore.clone()); - - // Create a sled... - let sled_id = create_test_sled(&datastore).await; - - // ... and a zpool within that sled... - let zpool_id = create_test_zpool(&datastore, sled_id).await; - - // ... and datasets within that zpool. - let dataset_count = REGION_REDUNDANCY_THRESHOLD * 2; - let bogus_addr = - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); - let dataset_ids: Vec = - (0..dataset_count).map(|_| Uuid::new_v4()).collect(); - for id in &dataset_ids { - let dataset = - Dataset::new(*id, zpool_id, bogus_addr, DatasetKind::Crucible); - datastore.dataset_upsert(dataset).await.unwrap(); - } - - // Allocate regions from the datasets for this disk. - let params = create_test_disk_create_params( - "disk1", - ByteCount::from_mebibytes_u32(500), - ); - let volume1_id = Uuid::new_v4(); - // Currently, we only allocate one Region Set per volume. - let expected_region_count = REGION_REDUNDANCY_THRESHOLD; - let dataset_and_regions = datastore - .region_allocate(&opctx, volume1_id, ¶ms) - .await - .unwrap(); - - // Verify the allocation. - assert_eq!(expected_region_count, dataset_and_regions.len()); - let mut disk1_datasets = HashSet::new(); - for (dataset, region) in dataset_and_regions { - assert!(disk1_datasets.insert(dataset.id())); - assert_eq!(volume1_id, region.volume_id()); - assert_eq!(ByteCount::from(4096), region.block_size()); - assert_eq!(params.extent_size() / 4096, region.blocks_per_extent()); - assert_eq!(params.extent_count(), region.extent_count()); - } - - // Allocate regions for a second disk. Observe that we allocate from - // the three previously unused datasets. - let params = create_test_disk_create_params( - "disk2", - ByteCount::from_mebibytes_u32(500), - ); - let volume2_id = Uuid::new_v4(); - let dataset_and_regions = datastore - .region_allocate(&opctx, volume2_id, ¶ms) - .await - .unwrap(); - assert_eq!(expected_region_count, dataset_and_regions.len()); - let mut disk2_datasets = HashSet::new(); - for (dataset, region) in dataset_and_regions { - assert!(disk2_datasets.insert(dataset.id())); - assert_eq!(volume2_id, region.volume_id()); - assert_eq!(ByteCount::from(4096), region.block_size()); - assert_eq!(params.extent_size() / 4096, region.blocks_per_extent()); - assert_eq!(params.extent_count(), region.extent_count()); - } - - // Double-check that the datasets used for the first disk weren't - // used when allocating the second disk. - assert_eq!(0, disk1_datasets.intersection(&disk2_datasets).count()); - - let _ = db.cleanup().await; - logctx.cleanup_successful(); - } - - #[tokio::test] - async fn test_region_allocation_is_idempotent() { - let logctx = - dev::test_setup_log("test_region_allocation_is_idempotent"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new(&cfg); - let datastore = Arc::new(DataStore::new(Arc::new(pool))); - let opctx = - OpContext::for_tests(logctx.log.new(o!()), datastore.clone()); - - // Create a sled... - let sled_id = create_test_sled(&datastore).await; - - // ... and a zpool within that sled... - let zpool_id = create_test_zpool(&datastore, sled_id).await; - - // ... and datasets within that zpool. - let dataset_count = REGION_REDUNDANCY_THRESHOLD; - let bogus_addr = - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); - let dataset_ids: Vec = - (0..dataset_count).map(|_| Uuid::new_v4()).collect(); - for id in &dataset_ids { - let dataset = - Dataset::new(*id, zpool_id, bogus_addr, DatasetKind::Crucible); - datastore.dataset_upsert(dataset).await.unwrap(); - } - - // Allocate regions from the datasets for this volume. - let params = create_test_disk_create_params( - "disk", - ByteCount::from_mebibytes_u32(500), - ); - let volume_id = Uuid::new_v4(); - let mut dataset_and_regions1 = datastore - .region_allocate(&opctx, volume_id, ¶ms) - .await - .unwrap(); - let mut dataset_and_regions2 = datastore - .region_allocate(&opctx, volume_id, ¶ms) - .await - .unwrap(); - - // Give them a consistent order so we can easily compare them. - let sort_vec = |v: &mut Vec<(Dataset, Region)>| { - v.sort_by(|(d1, r1), (d2, r2)| { - let order = d1.id().cmp(&d2.id()); - match order { - std::cmp::Ordering::Equal => r1.id().cmp(&r2.id()), - _ => order, - } - }); - }; - sort_vec(&mut dataset_and_regions1); - sort_vec(&mut dataset_and_regions2); - - // Validate that the two calls to allocate return the same data. - assert_eq!(dataset_and_regions1.len(), dataset_and_regions2.len()); - for i in 0..dataset_and_regions1.len() { - assert_eq!(dataset_and_regions1[i], dataset_and_regions2[i],); - } - - let _ = db.cleanup().await; - logctx.cleanup_successful(); - } - - #[tokio::test] - async fn test_region_allocation_not_enough_datasets() { - let logctx = - dev::test_setup_log("test_region_allocation_not_enough_datasets"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new(&cfg); - let datastore = Arc::new(DataStore::new(Arc::new(pool))); - let opctx = - OpContext::for_tests(logctx.log.new(o!()), datastore.clone()); - - // Create a sled... - let sled_id = create_test_sled(&datastore).await; - - // ... and a zpool within that sled... - let zpool_id = create_test_zpool(&datastore, sled_id).await; - - // ... and datasets within that zpool. - let dataset_count = REGION_REDUNDANCY_THRESHOLD - 1; - let bogus_addr = - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); - let dataset_ids: Vec = - (0..dataset_count).map(|_| Uuid::new_v4()).collect(); - for id in &dataset_ids { - let dataset = - Dataset::new(*id, zpool_id, bogus_addr, DatasetKind::Crucible); - datastore.dataset_upsert(dataset).await.unwrap(); - } - - // Allocate regions from the datasets for this volume. - let params = create_test_disk_create_params( - "disk1", - ByteCount::from_mebibytes_u32(500), - ); - let volume1_id = Uuid::new_v4(); - let err = datastore - .region_allocate(&opctx, volume1_id, ¶ms) - .await - .unwrap_err(); - assert!(err - .to_string() - .contains("Not enough datasets to allocate disks")); - - assert!(matches!(err, Error::ServiceUnavailable { .. })); - - let _ = db.cleanup().await; - logctx.cleanup_successful(); - } - - // TODO: This test should be updated when the correct handling - // of this out-of-space case is implemented. - #[tokio::test] - async fn test_region_allocation_out_of_space_does_not_fail_yet() { - let logctx = dev::test_setup_log( - "test_region_allocation_out_of_space_does_not_fail_yet", - ); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new(&cfg); - let datastore = Arc::new(DataStore::new(Arc::new(pool))); - let opctx = - OpContext::for_tests(logctx.log.new(o!()), datastore.clone()); - - // Create a sled... - let sled_id = create_test_sled(&datastore).await; - - // ... and a zpool within that sled... - let zpool_id = create_test_zpool(&datastore, sled_id).await; - - // ... and datasets within that zpool. - let dataset_count = REGION_REDUNDANCY_THRESHOLD; - let bogus_addr = - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); - let dataset_ids: Vec = - (0..dataset_count).map(|_| Uuid::new_v4()).collect(); - for id in &dataset_ids { - let dataset = - Dataset::new(*id, zpool_id, bogus_addr, DatasetKind::Crucible); - datastore.dataset_upsert(dataset).await.unwrap(); - } - - // Allocate regions from the datasets for this disk. - // - // Note that we ask for a disk which is as large as the zpool, - // so we shouldn't have space for redundancy. - let disk_size = test_zpool_size(); - let params = create_test_disk_create_params("disk1", disk_size); - let volume1_id = Uuid::new_v4(); - - // NOTE: This *should* be an error, rather than succeeding. - datastore.region_allocate(&opctx, volume1_id, ¶ms).await.unwrap(); - - let _ = db.cleanup().await; - logctx.cleanup_successful(); - } - - // Validate that queries which should be executable without a full table - // scan are, in fact, runnable without a FULL SCAN. - #[tokio::test] - async fn test_queries_do_not_require_full_table_scan() { - use omicron_common::api::external; - let logctx = - dev::test_setup_log("test_queries_do_not_require_full_table_scan"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new(&cfg); - let datastore = DataStore::new(Arc::new(pool)); - - let explanation = DataStore::get_allocated_regions_query(Uuid::nil()) - .explain_async(datastore.pool()) - .await - .unwrap(); - assert!( - !explanation.contains("FULL SCAN"), - "Found an unexpected FULL SCAN: {}", - explanation - ); - - let explanation = DataStore::get_allocatable_datasets_query() - .explain_async(datastore.pool()) - .await - .unwrap(); - assert!( - !explanation.contains("FULL SCAN"), - "Found an unexpected FULL SCAN: {}", - explanation - ); - - let subnet = db::model::VpcSubnet::new( - Uuid::nil(), - Uuid::nil(), - external::IdentityMetadataCreateParams { - name: external::Name::try_from(String::from("name")).unwrap(), - description: String::from("description"), - }, - external::Ipv4Net("172.30.0.0/22".parse().unwrap()), - external::Ipv6Net("fd00::/64".parse().unwrap()), - ); - let values = FilterConflictingVpcSubnetRangesQuery::new(subnet); - let query = - diesel::insert_into(db::schema::vpc_subnet::dsl::vpc_subnet) - .values(values) - .returning(VpcSubnet::as_returning()); - println!("{}", diesel::debug_query(&query)); - let explanation = query.explain_async(datastore.pool()).await.unwrap(); - assert!( - !explanation.contains("FULL SCAN"), - "Found an unexpected FULL SCAN: {}", - explanation, - ); - - let _ = db.cleanup().await; - logctx.cleanup_successful(); - } - - // Test sled-specific IPv6 address allocation - #[tokio::test] - async fn test_sled_ipv6_address_allocation() { - use omicron_common::address::RSS_RESERVED_ADDRESSES as STATIC_IPV6_ADDRESS_OFFSET; - use std::net::Ipv6Addr; - - let logctx = dev::test_setup_log("test_sled_ipv6_address_allocation"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = Arc::new(db::Pool::new(&cfg)); - let datastore = Arc::new(DataStore::new(Arc::clone(&pool))); - let opctx = - OpContext::for_tests(logctx.log.new(o!()), datastore.clone()); - - let rack_id = Uuid::new_v4(); - let addr1 = "[fd00:1de::1]:12345".parse().unwrap(); - let sled1_id = "0de4b299-e0b4-46f0-d528-85de81a7095f".parse().unwrap(); - let sled1 = db::model::Sled::new(sled1_id, addr1, rack_id); - datastore.sled_upsert(sled1).await.unwrap(); - - let addr2 = "[fd00:1df::1]:12345".parse().unwrap(); - let sled2_id = "66285c18-0c79-43e0-e54f-95271f271314".parse().unwrap(); - let sled2 = db::model::Sled::new(sled2_id, addr2, rack_id); - datastore.sled_upsert(sled2).await.unwrap(); - - let ip = datastore.next_ipv6_address(&opctx, sled1_id).await.unwrap(); - let expected_ip = Ipv6Addr::new( - 0xfd00, - 0x1de, - 0, - 0, - 0, - 0, - 0, - 2 + STATIC_IPV6_ADDRESS_OFFSET, - ); - assert_eq!(ip, expected_ip); - let ip = datastore.next_ipv6_address(&opctx, sled1_id).await.unwrap(); - let expected_ip = Ipv6Addr::new( - 0xfd00, - 0x1de, - 0, - 0, - 0, - 0, - 0, - 3 + STATIC_IPV6_ADDRESS_OFFSET, - ); - assert_eq!(ip, expected_ip); - - let ip = datastore.next_ipv6_address(&opctx, sled2_id).await.unwrap(); - let expected_ip = Ipv6Addr::new( - 0xfd00, - 0x1df, - 0, - 0, - 0, - 0, - 0, - 2 + STATIC_IPV6_ADDRESS_OFFSET, - ); - assert_eq!(ip, expected_ip); - - let _ = db.cleanup().await; - logctx.cleanup_successful(); - } - - #[tokio::test] - async fn test_ssh_keys() { - let logctx = dev::test_setup_log("test_ssh_keys"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; - - // Create a new Silo user so that we can lookup their keys. - let silo_user_id = Uuid::new_v4(); - datastore - .silo_user_create(SiloUser::new( - *SILO_ID, - silo_user_id, - "external@id".into(), - )) - .await - .unwrap(); - - let (.., authz_user) = LookupPath::new(&opctx, &datastore) - .silo_user_id(silo_user_id) - .lookup_for(authz::Action::CreateChild) - .await - .unwrap(); - assert_eq!(authz_user.id(), silo_user_id); - - // Create a new SSH public key for the new user. - let key_name = Name::try_from(String::from("sshkey")).unwrap(); - let public_key = "ssh-test AAAAAAAAKEY".to_string(); - let ssh_key = SshKey::new( - silo_user_id, - params::SshKeyCreate { - identity: IdentityMetadataCreateParams { - name: key_name.clone(), - description: "my SSH public key".to_string(), - }, - public_key, - }, - ); - let created = datastore - .ssh_key_create(&opctx, &authz_user, ssh_key.clone()) - .await - .unwrap(); - assert_eq!(created.silo_user_id, ssh_key.silo_user_id); - assert_eq!(created.public_key, ssh_key.public_key); - - // Lookup the key we just created. - let (authz_silo, authz_silo_user, authz_ssh_key, found) = - LookupPath::new(&opctx, &datastore) - .silo_user_id(silo_user_id) - .ssh_key_name(&key_name.into()) - .fetch() - .await - .unwrap(); - assert_eq!(authz_silo.id(), *SILO_ID); - assert_eq!(authz_silo_user.id(), silo_user_id); - assert_eq!(found.silo_user_id, ssh_key.silo_user_id); - assert_eq!(found.public_key, ssh_key.public_key); - - // Trying to insert the same one again fails. - let duplicate = datastore - .ssh_key_create(&opctx, &authz_user, ssh_key.clone()) - .await; - assert!(matches!( - duplicate, - Err(Error::ObjectAlreadyExists { type_name, object_name }) - if type_name == ResourceType::SshKey - && object_name == "sshkey" - )); - - // Delete the key we just created. - datastore.ssh_key_delete(&opctx, &authz_ssh_key).await.unwrap(); - - // Clean up. - db.cleanup().await.unwrap(); - logctx.cleanup_successful(); - } - - #[tokio::test] - async fn test_service_upsert() { - let logctx = dev::test_setup_log("test_service_upsert"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; - - // Create a sled on which the service should exist. - let sled_id = create_test_sled(&datastore).await; - - // Create a new service to exist on this sled. - let service_id = Uuid::new_v4(); - let addr = Ipv6Addr::LOCALHOST; - let kind = ServiceKind::Nexus; - - let service = Service::new(service_id, sled_id, addr, kind); - let result = - datastore.service_upsert(&opctx, service.clone()).await.unwrap(); - assert_eq!(service.id(), result.id()); - assert_eq!(service.ip, result.ip); - assert_eq!(service.kind, result.kind); - - db.cleanup().await.unwrap(); - logctx.cleanup_successful(); - } - - #[tokio::test] - async fn test_rack_initialize_is_idempotent() { - let logctx = dev::test_setup_log("test_rack_initialize_is_idempotent"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; - - // Create a Rack, insert it into the DB. - let rack = Rack::new(Uuid::new_v4()); - let result = datastore.rack_insert(&opctx, &rack).await.unwrap(); - assert_eq!(result.id(), rack.id()); - assert_eq!(result.initialized, false); - - // Re-insert the Rack (check for idempotency). - let result = datastore.rack_insert(&opctx, &rack).await.unwrap(); - assert_eq!(result.id(), rack.id()); - assert_eq!(result.initialized, false); - - // Initialize the Rack. - let result = datastore - .rack_set_initialized(&opctx, rack.id(), vec![]) - .await - .unwrap(); - assert!(result.initialized); - - // Re-initialize the rack (check for idempotency) - let result = datastore - .rack_set_initialized(&opctx, rack.id(), vec![]) - .await - .unwrap(); - assert!(result.initialized); - - db.cleanup().await.unwrap(); - logctx.cleanup_successful(); - } - - #[tokio::test] - async fn test_table_scan() { - let logctx = dev::test_setup_log("test_table_scan"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; - - let error = datastore.test_try_table_scan(&opctx).await; - println!("error from attempted table scan: {:#}", error); - match error { - Error::InternalError { internal_message } => { - assert!(internal_message.contains( - "contains a full table/index scan which is \ - explicitly disallowed" - )); - } - error => panic!( - "expected internal error with specific message, found {:?}", - error - ), - } - - // Clean up. - db.cleanup().await.unwrap(); - logctx.cleanup_successful(); - } - - #[tokio::test] - async fn test_deallocate_instance_external_ip_is_idempotent() { - use crate::db::schema::instance_external_ip::dsl; - - let logctx = dev::test_setup_log("test_table_scan"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; - - // Create a record. - let now = Utc::now(); - let ip = InstanceExternalIp { - id: Uuid::new_v4(), - time_created: now, - time_modified: now, - time_deleted: None, - ip_pool_id: Uuid::new_v4(), - ip_pool_range_id: Uuid::new_v4(), - instance_id: Uuid::new_v4(), - ip: ipnetwork::IpNetwork::from(IpAddr::from(Ipv4Addr::new( - 10, 0, 0, 1, - ))), - first_port: crate::db::model::SqlU16(0), - last_port: crate::db::model::SqlU16(10), - }; - diesel::insert_into(dsl::instance_external_ip) - .values(ip) - .execute_async(datastore.pool()) - .await - .unwrap(); - - // Delete it twice, make sure we get the right sentinel return values. - let deleted = datastore - .deallocate_instance_external_ip(&opctx, ip.id) - .await - .unwrap(); - assert!( - deleted, - "Got unexpected sentinel value back when \ - deleting external IP the first time" - ); - let deleted = datastore - .deallocate_instance_external_ip(&opctx, ip.id) - .await - .unwrap(); - assert!( - !deleted, - "Got unexpected sentinel value back when \ - deleting external IP the second time" - ); - - // Deleting a non-existing record fails - assert!(datastore - .deallocate_instance_external_ip(&opctx, Uuid::nil()) - .await - .is_err()); - - db.cleanup().await.unwrap(); - logctx.cleanup_successful(); - } -} diff --git a/nexus/src/db/datastore/console_session.rs b/nexus/src/db/datastore/console_session.rs new file mode 100644 index 00000000000..07743adabac --- /dev/null +++ b/nexus/src/db/datastore/console_session.rs @@ -0,0 +1,147 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods related to [`ConsoleSession`]s. + +use super::DataStore; +use crate::authn; +use crate::authz; +use crate::context::OpContext; +use crate::db; +use crate::db::lookup::LookupPath; +use crate::db::model::ConsoleSession; +use crate::db::model::IdentityType; +use async_bb8_diesel::AsyncRunQueryDsl; +use chrono::Utc; +use diesel::prelude::*; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DeleteResult; +use omicron_common::api::external::Error; +use omicron_common::api::external::InternalContext; +use omicron_common::api::external::UpdateResult; + +impl DataStore { + // TODO-correctness: fix session method errors. the map_errs turn all errors + // into 500s, most notably (and most frequently) session not found. they + // don't end up as 500 in the http response because they get turned into a + // 4xx error by calling code, the session cookie authn scheme. this is + // necessary for now in order to avoid the possibility of leaking out a + // too-friendly 404 to the client. once datastore has its own error type and + // the conversion to serializable user-facing errors happens elsewhere (see + // issue #347) these methods can safely return more accurate errors, and + // showing/hiding that info as appropriate will be handled higher up + // TODO-correctness this may apply at the Nexus level as well. + + pub async fn session_create( + &self, + opctx: &OpContext, + session: ConsoleSession, + ) -> CreateResult { + opctx + .authorize(authz::Action::CreateChild, &authz::CONSOLE_SESSION_LIST) + .await?; + + use db::schema::console_session::dsl; + + diesel::insert_into(dsl::console_session) + .values(session) + .returning(ConsoleSession::as_returning()) + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + Error::internal_error(&format!( + "error creating session: {:?}", + e + )) + }) + } + + pub async fn session_update_last_used( + &self, + opctx: &OpContext, + authz_session: &authz::ConsoleSession, + ) -> UpdateResult { + opctx.authorize(authz::Action::Modify, authz_session).await?; + + use db::schema::console_session::dsl; + let console_session = diesel::update(dsl::console_session) + .filter(dsl::token.eq(authz_session.id())) + .set((dsl::time_last_used.eq(Utc::now()),)) + .returning(ConsoleSession::as_returning()) + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + Error::internal_error(&format!( + "error renewing session: {:?}", + e + )) + })?; + + let (.., db_silo_user) = LookupPath::new(opctx, &self) + .silo_user_id(console_session.silo_user_id) + .fetch() + .await + .map_err(|e| { + Error::internal_error(&format!( + "error fetching silo id: {:?}", + e + )) + })?; + + Ok(authn::ConsoleSessionWithSiloId { + console_session, + silo_id: db_silo_user.silo_id, + }) + } + + // putting "hard" in the name because we don't do this with any other model + pub async fn session_hard_delete( + &self, + opctx: &OpContext, + authz_session: &authz::ConsoleSession, + ) -> DeleteResult { + // We don't do a typical authz check here. Instead, knowing that every + // user is allowed to delete their own session, the query below filters + // on the session's silo_user_id matching the current actor's id. + // + // We could instead model this more like other authz checks. That would + // involve fetching the session record from the database, storing the + // associated silo_user_id into the `authz::ConsoleSession`, and having + // an Oso rule saying you can delete a session whose associated silo + // user matches the authenticated actor. This would be a fair bit more + // complicated and more work at runtime work than what we're doing here. + // The tradeoff is that we're effectively encoding policy here, but it + // seems worth it in this case. + let actor = opctx + .authn + .actor_required() + .internal_context("deleting current user's session")?; + + // This check shouldn't be required in that there should be no overlap + // between silo user ids and other types of identity ids. But it's easy + // to check, and if we add another type of Actor, we'll be forced here + // to consider if they should be able to have console sessions and log + // out of them. + let silo_user_id = match actor.actor_type() { + IdentityType::SiloUser => actor.actor_id(), + IdentityType::UserBuiltin => { + return Err(Error::invalid_request("not a Silo user")) + } + }; + + use db::schema::console_session::dsl; + diesel::delete(dsl::console_session) + .filter(dsl::silo_user_id.eq(silo_user_id)) + .filter(dsl::token.eq(authz_session.id())) + .execute_async(self.pool_authorized(opctx).await?) + .await + .map(|_rows_deleted| ()) + .map_err(|e| { + Error::internal_error(&format!( + "error deleting session: {:?}", + e + )) + }) + } +} diff --git a/nexus/src/db/datastore/dataset.rs b/nexus/src/db/datastore/dataset.rs new file mode 100644 index 00000000000..0023cbb972e --- /dev/null +++ b/nexus/src/db/datastore/dataset.rs @@ -0,0 +1,87 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods on [`Dataset`]s. + +use super::DataStore; +use super::RunnableQuery; +use super::REGION_REDUNDANCY_THRESHOLD; +use crate::db; +use crate::db::collection_insert::AsyncInsertError; +use crate::db::collection_insert::DatastoreCollection; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::identity::Asset; +use crate::db::model::Dataset; +use crate::db::model::DatasetKind; +use crate::db::model::Zpool; +use chrono::Utc; +use diesel::prelude::*; +use diesel::upsert::excluded; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::Error; +use omicron_common::api::external::LookupType; +use omicron_common::api::external::ResourceType; + +impl DataStore { + /// Stores a new dataset in the database. + pub async fn dataset_upsert( + &self, + dataset: Dataset, + ) -> CreateResult { + use db::schema::dataset::dsl; + + let zpool_id = dataset.pool_id; + Zpool::insert_resource( + zpool_id, + diesel::insert_into(dsl::dataset) + .values(dataset.clone()) + .on_conflict(dsl::id) + .do_update() + .set(( + dsl::time_modified.eq(Utc::now()), + dsl::pool_id.eq(excluded(dsl::pool_id)), + dsl::ip.eq(excluded(dsl::ip)), + dsl::port.eq(excluded(dsl::port)), + dsl::kind.eq(excluded(dsl::kind)), + )), + ) + .insert_and_get_result_async(self.pool()) + .await + .map_err(|e| match e { + AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { + type_name: ResourceType::Zpool, + lookup_type: LookupType::ById(zpool_id), + }, + AsyncInsertError::DatabaseError(e) => { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict( + ResourceType::Dataset, + &dataset.id().to_string(), + ), + ) + } + }) + } + + pub(super) fn get_allocatable_datasets_query() -> impl RunnableQuery + { + use db::schema::dataset::dsl; + + dsl::dataset + // We look for valid datasets (non-deleted crucible datasets). + .filter(dsl::size_used.is_not_null()) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::kind.eq(DatasetKind::Crucible)) + .order(dsl::size_used.asc()) + // TODO: We admittedly don't actually *fail* any request for + // running out of space - we try to send the request down to + // crucible agents, and expect them to fail on our behalf in + // out-of-storage conditions. This should undoubtedly be + // handled more explicitly. + .select(Dataset::as_select()) + .limit(REGION_REDUNDANCY_THRESHOLD.try_into().unwrap()) + } +} diff --git a/nexus/src/db/datastore/device_auth.rs b/nexus/src/db/datastore/device_auth.rs new file mode 100644 index 00000000000..c9e221fd92a --- /dev/null +++ b/nexus/src/db/datastore/device_auth.rs @@ -0,0 +1,142 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods related to OAuth 2.0 Device Authorization Grants. + +use super::DataStore; +use crate::authz; +use crate::context::OpContext; +use crate::db; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::error::TransactionError; +use crate::db::model::DeviceAccessToken; +use crate::db::model::DeviceAuthRequest; +use async_bb8_diesel::AsyncConnection; +use async_bb8_diesel::AsyncRunQueryDsl; +use diesel::prelude::*; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::Error; +use omicron_common::api::external::LookupResult; +use omicron_common::api::external::LookupType; +use omicron_common::api::external::ResourceType; +use uuid::Uuid; + +impl DataStore { + /// Start a device authorization grant flow by recording the request + /// and initial response parameters. + pub async fn device_auth_request_create( + &self, + opctx: &OpContext, + auth_request: DeviceAuthRequest, + ) -> CreateResult { + opctx + .authorize( + authz::Action::CreateChild, + &authz::DEVICE_AUTH_REQUEST_LIST, + ) + .await?; + + use db::schema::device_auth_request::dsl; + diesel::insert_into(dsl::device_auth_request) + .values(auth_request) + .returning(DeviceAuthRequest::as_returning()) + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + /// Remove the device authorization request and create a new device + /// access token record. The token may already be expired if the flow + /// was not completed in time. + pub async fn device_access_token_create( + &self, + opctx: &OpContext, + authz_request: &authz::DeviceAuthRequest, + authz_user: &authz::SiloUser, + access_token: DeviceAccessToken, + ) -> CreateResult { + assert_eq!(authz_user.id(), access_token.silo_user_id); + opctx.authorize(authz::Action::Delete, authz_request).await?; + opctx.authorize(authz::Action::CreateChild, authz_user).await?; + + use db::schema::device_auth_request::dsl as request_dsl; + let delete_request = diesel::delete(request_dsl::device_auth_request) + .filter(request_dsl::user_code.eq(authz_request.id())); + + use db::schema::device_access_token::dsl as token_dsl; + let insert_token = diesel::insert_into(token_dsl::device_access_token) + .values(access_token) + .returning(DeviceAccessToken::as_returning()); + + #[derive(Debug)] + enum TokenGrantError { + RequestNotFound, + TooManyRequests, + } + type TxnError = TransactionError; + + self.pool_authorized(opctx) + .await? + .transaction(move |conn| match delete_request.execute(conn)? { + 0 => { + Err(TxnError::CustomError(TokenGrantError::RequestNotFound)) + } + 1 => Ok(insert_token.get_result(conn)?), + _ => Err(TxnError::CustomError( + TokenGrantError::TooManyRequests, + )), + }) + .await + .map_err(|e| match e { + TxnError::CustomError(TokenGrantError::RequestNotFound) => { + Error::ObjectNotFound { + type_name: ResourceType::DeviceAuthRequest, + lookup_type: LookupType::ByCompositeId( + authz_request.id(), + ), + } + } + TxnError::CustomError(TokenGrantError::TooManyRequests) => { + Error::internal_error("unexpectedly found multiple device auth requests for the same user code") + } + TxnError::Pool(e) => { + public_error_from_diesel_pool(e, ErrorHandler::Server) + } + }) + } + + /// Look up a granted device access token. + /// Note: since this lookup is not by a primary key or name, + /// (though it does use a unique index), it does not fit the + /// usual lookup machinery pattern. It therefore does include + /// any authz checks. However, the device code is a single-use + /// high-entropy random token, and so should not be guessable + /// by an attacker. + pub async fn device_access_token_fetch( + &self, + opctx: &OpContext, + client_id: Uuid, + device_code: String, + ) -> LookupResult { + use db::schema::device_access_token::dsl; + dsl::device_access_token + .filter(dsl::client_id.eq(client_id)) + .filter(dsl::device_code.eq(device_code)) + .select(DeviceAccessToken::as_select()) + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::DeviceAccessToken, + LookupType::ByCompositeId( + "client_id, device_code".to_string(), + ), + ), + ) + }) + } +} diff --git a/nexus/src/db/datastore/disk.rs b/nexus/src/db/datastore/disk.rs new file mode 100644 index 00000000000..3b7a421d00e --- /dev/null +++ b/nexus/src/db/datastore/disk.rs @@ -0,0 +1,540 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods on [`Disk`]s. + +use super::DataStore; +use crate::authz; +use crate::authz::ApiResource; +use crate::context::OpContext; +use crate::db; +use crate::db::collection_attach::AttachError; +use crate::db::collection_attach::DatastoreAttachTarget; +use crate::db::collection_detach::DatastoreDetachTarget; +use crate::db::collection_detach::DetachError; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::identity::Resource; +use crate::db::lookup::LookupPath; +use crate::db::model::Disk; +use crate::db::model::DiskRuntimeState; +use crate::db::model::Instance; +use crate::db::model::Name; +use crate::db::pagination::paginated; +use crate::db::update_and_check::UpdateAndCheck; +use crate::db::update_and_check::UpdateStatus; +use async_bb8_diesel::AsyncRunQueryDsl; +use chrono::Utc; +use diesel::prelude::*; +use omicron_common::api; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DataPageParams; +use omicron_common::api::external::Error; +use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::LookupResult; +use omicron_common::api::external::LookupType; +use omicron_common::api::external::ResourceType; +use omicron_common::bail_unless; +use uuid::Uuid; + +impl DataStore { + /// List disks associated with a given instance. + pub async fn instance_list_disks( + &self, + opctx: &OpContext, + authz_instance: &authz::Instance, + pagparams: &DataPageParams<'_, Name>, + ) -> ListResultVec { + use db::schema::disk::dsl; + + opctx.authorize(authz::Action::ListChildren, authz_instance).await?; + + paginated(dsl::disk, dsl::name, &pagparams) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::attach_instance_id.eq(authz_instance.id())) + .select(Disk::as_select()) + .load_async::(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + pub async fn project_create_disk(&self, disk: Disk) -> CreateResult { + use db::schema::disk::dsl; + + let gen = disk.runtime().gen; + let name = disk.name().clone(); + let disk: Disk = diesel::insert_into(dsl::disk) + .values(disk) + .on_conflict(dsl::id) + .do_nothing() + .returning(Disk::as_returning()) + .get_result_async(self.pool()) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict(ResourceType::Disk, name.as_str()), + ) + })?; + + let runtime = disk.runtime(); + bail_unless!( + runtime.state().state() == &api::external::DiskState::Creating, + "newly-created Disk has unexpected state: {:?}", + runtime.disk_state + ); + bail_unless!( + runtime.gen == gen, + "newly-created Disk has unexpected generation: {:?}", + runtime.gen + ); + Ok(disk) + } + + pub async fn project_list_disks( + &self, + opctx: &OpContext, + authz_project: &authz::Project, + pagparams: &DataPageParams<'_, Name>, + ) -> ListResultVec { + opctx.authorize(authz::Action::ListChildren, authz_project).await?; + + use db::schema::disk::dsl; + paginated(dsl::disk, dsl::name, &pagparams) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::project_id.eq(authz_project.id())) + .select(Disk::as_select()) + .load_async::(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + /// Attaches a disk to an instance, if both objects: + /// - Exist + /// - Are in valid states + /// - Are under the maximum "attach count" threshold + pub async fn instance_attach_disk( + &self, + opctx: &OpContext, + authz_instance: &authz::Instance, + authz_disk: &authz::Disk, + max_disks: u32, + ) -> Result<(Instance, Disk), Error> { + use db::schema::{disk, instance}; + + opctx.authorize(authz::Action::Modify, authz_instance).await?; + opctx.authorize(authz::Action::Modify, authz_disk).await?; + + let ok_to_attach_disk_states = vec![ + api::external::DiskState::Creating, + api::external::DiskState::Detached, + ]; + let ok_to_attach_disk_state_labels: Vec<_> = + ok_to_attach_disk_states.iter().map(|s| s.label()).collect(); + + // TODO(https://github.com/oxidecomputer/omicron/issues/811): + // This list of instance attach states is more restrictive than it + // plausibly could be. + // + // We currently only permit attaching disks to stopped instances. + let ok_to_attach_instance_states = vec![ + db::model::InstanceState(api::external::InstanceState::Creating), + db::model::InstanceState(api::external::InstanceState::Stopped), + ]; + + let attached_label = + api::external::DiskState::Attached(authz_instance.id()).label(); + + let (instance, disk) = Instance::attach_resource( + authz_instance.id(), + authz_disk.id(), + instance::table + .into_boxed() + .filter(instance::dsl::state.eq_any(ok_to_attach_instance_states)), + disk::table + .into_boxed() + .filter(disk::dsl::disk_state.eq_any(ok_to_attach_disk_state_labels)), + max_disks, + diesel::update(disk::dsl::disk) + .set(( + disk::dsl::disk_state.eq(attached_label), + disk::dsl::attach_instance_id.eq(authz_instance.id()) + )) + ) + .attach_and_get_result_async(self.pool_authorized(opctx).await?) + .await + .or_else(|e| { + match e { + AttachError::CollectionNotFound => { + Err(Error::not_found_by_id( + ResourceType::Instance, + &authz_instance.id(), + )) + }, + AttachError::ResourceNotFound => { + Err(Error::not_found_by_id( + ResourceType::Disk, + &authz_disk.id(), + )) + }, + AttachError::NoUpdate { attached_count, resource, collection } => { + let disk_state = resource.state().into(); + match disk_state { + // Idempotent errors: We did not perform an update, + // because we're already in the process of attaching. + api::external::DiskState::Attached(id) if id == authz_instance.id() => { + return Ok((collection, resource)); + } + api::external::DiskState::Attaching(id) if id == authz_instance.id() => { + return Ok((collection, resource)); + } + // Ok-to-attach disk states: Inspect the state to infer + // why we did not attach. + api::external::DiskState::Creating | + api::external::DiskState::Detached => { + match collection.runtime_state.state.state() { + // Ok-to-be-attached instance states: + api::external::InstanceState::Creating | + api::external::InstanceState::Stopped => { + // The disk is ready to be attached, and the + // instance is ready to be attached. Perhaps + // we are at attachment capacity? + if attached_count == i64::from(max_disks) { + return Err(Error::invalid_request(&format!( + "cannot attach more than {} disks to instance", + max_disks + ))); + } + + // We can't attach, but the error hasn't + // helped us infer why. + return Err(Error::internal_error( + "cannot attach disk" + )); + } + // Not okay-to-be-attached instance states: + _ => { + Err(Error::invalid_request(&format!( + "cannot attach disk to instance in {} state", + collection.runtime_state.state.state(), + ))) + } + } + }, + // Not-okay-to-attach disk states: The disk is attached elsewhere. + api::external::DiskState::Attached(_) | + api::external::DiskState::Attaching(_) | + api::external::DiskState::Detaching(_) => { + Err(Error::invalid_request(&format!( + "cannot attach disk \"{}\": disk is attached to another instance", + resource.name().as_str(), + ))) + } + _ => { + Err(Error::invalid_request(&format!( + "cannot attach disk \"{}\": invalid state {}", + resource.name().as_str(), + disk_state, + ))) + } + } + }, + AttachError::DatabaseError(e) => { + Err(public_error_from_diesel_pool(e, ErrorHandler::Server)) + }, + } + })?; + + Ok((instance, disk)) + } + + pub async fn instance_detach_disk( + &self, + opctx: &OpContext, + authz_instance: &authz::Instance, + authz_disk: &authz::Disk, + ) -> Result { + use db::schema::{disk, instance}; + + opctx.authorize(authz::Action::Modify, authz_instance).await?; + opctx.authorize(authz::Action::Modify, authz_disk).await?; + + let ok_to_detach_disk_states = + vec![api::external::DiskState::Attached(authz_instance.id())]; + let ok_to_detach_disk_state_labels: Vec<_> = + ok_to_detach_disk_states.iter().map(|s| s.label()).collect(); + + // TODO(https://github.com/oxidecomputer/omicron/issues/811): + // This list of instance detach states is more restrictive than it + // plausibly could be. + // + // We currently only permit detaching disks from stopped instances. + let ok_to_detach_instance_states = vec![ + db::model::InstanceState(api::external::InstanceState::Creating), + db::model::InstanceState(api::external::InstanceState::Stopped), + ]; + + let detached_label = api::external::DiskState::Detached.label(); + + let disk = Instance::detach_resource( + authz_instance.id(), + authz_disk.id(), + instance::table + .into_boxed() + .filter(instance::dsl::state.eq_any(ok_to_detach_instance_states)), + disk::table + .into_boxed() + .filter(disk::dsl::disk_state.eq_any(ok_to_detach_disk_state_labels)), + diesel::update(disk::dsl::disk) + .set(( + disk::dsl::disk_state.eq(detached_label), + disk::dsl::attach_instance_id.eq(Option::::None) + )) + ) + .detach_and_get_result_async(self.pool_authorized(opctx).await?) + .await + .or_else(|e| { + match e { + DetachError::CollectionNotFound => { + Err(Error::not_found_by_id( + ResourceType::Instance, + &authz_instance.id(), + )) + }, + DetachError::ResourceNotFound => { + Err(Error::not_found_by_id( + ResourceType::Disk, + &authz_disk.id(), + )) + }, + DetachError::NoUpdate { resource, collection } => { + let disk_state = resource.state().into(); + match disk_state { + // Idempotent errors: We did not perform an update, + // because we're already in the process of detaching. + api::external::DiskState::Detached => { + return Ok(resource); + } + api::external::DiskState::Detaching(id) if id == authz_instance.id() => { + return Ok(resource); + } + // Ok-to-detach disk states: Inspect the state to infer + // why we did not detach. + api::external::DiskState::Attached(id) if id == authz_instance.id() => { + match collection.runtime_state.state.state() { + // Ok-to-be-detached instance states: + api::external::InstanceState::Creating | + api::external::InstanceState::Stopped => { + // We can't detach, but the error hasn't + // helped us infer why. + return Err(Error::internal_error( + "cannot detach disk" + )); + } + // Not okay-to-be-detached instance states: + _ => { + Err(Error::invalid_request(&format!( + "cannot detach disk from instance in {} state", + collection.runtime_state.state.state(), + ))) + } + } + }, + api::external::DiskState::Attaching(id) if id == authz_instance.id() => { + Err(Error::invalid_request(&format!( + "cannot detach disk \"{}\": disk is currently being attached", + resource.name().as_str(), + ))) + }, + // Not-okay-to-detach disk states: The disk is attached elsewhere. + api::external::DiskState::Attached(_) | + api::external::DiskState::Attaching(_) | + api::external::DiskState::Detaching(_) => { + Err(Error::invalid_request(&format!( + "cannot detach disk \"{}\": disk is attached to another instance", + resource.name().as_str(), + ))) + } + _ => { + Err(Error::invalid_request(&format!( + "cannot detach disk \"{}\": invalid state {}", + resource.name().as_str(), + disk_state, + ))) + } + } + }, + DetachError::DatabaseError(e) => { + Err(public_error_from_diesel_pool(e, ErrorHandler::Server)) + }, + } + })?; + + Ok(disk) + } + + pub async fn disk_update_runtime( + &self, + opctx: &OpContext, + authz_disk: &authz::Disk, + new_runtime: &DiskRuntimeState, + ) -> Result { + // TODO-security This permission might be overloaded here. The way disk + // runtime updates work is that the caller in Nexus first updates the + // Sled Agent to make a change, then updates to the database to reflect + // that change. So by the time we get here, we better have already done + // an authz check, or we will have already made some unauthorized change + // to the system! At the same time, we don't want just anybody to be + // able to modify the database state. So we _do_ still want an authz + // check here. Arguably it's for a different kind of action, but it + // doesn't seem that useful to split it out right now. + opctx.authorize(authz::Action::Modify, authz_disk).await?; + + let disk_id = authz_disk.id(); + use db::schema::disk::dsl; + let updated = diesel::update(dsl::disk) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(disk_id)) + .filter(dsl::state_generation.lt(new_runtime.gen)) + .set(new_runtime.clone()) + .check_if_exists::(disk_id) + .execute_and_check(self.pool()) + .await + .map(|r| match r.status { + UpdateStatus::Updated => true, + UpdateStatus::NotUpdatedButExists => false, + }) + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByResource(authz_disk), + ) + })?; + + Ok(updated) + } + + /// Fetches information about a Disk that the caller has previously fetched + /// + /// The only difference between this function and a new fetch by id is that + /// this function preserves the `authz_disk` that you started with -- which + /// keeps track of how you looked it up. So if you looked it up by name, + /// the authz you get back will reflect that, whereas if you did a fresh + /// lookup by id, it wouldn't. + /// TODO-cleanup this could be provided by the Lookup API for any resource + pub async fn disk_refetch( + &self, + opctx: &OpContext, + authz_disk: &authz::Disk, + ) -> LookupResult { + let (.., db_disk) = LookupPath::new(opctx, self) + .disk_id(authz_disk.id()) + .fetch() + .await + .map_err(|e| match e { + // Use the "not found" message of the authz object we were + // given, which will reflect however the caller originally + // looked it up. + Error::ObjectNotFound { .. } => authz_disk.not_found(), + e => e, + })?; + Ok(db_disk) + } + + /// Updates a disk record to indicate it has been deleted. + /// + /// Returns the volume ID of associated with the deleted disk. + /// + /// Does not attempt to modify any resources (e.g. regions) which may + /// belong to the disk. + // TODO: Delete me (this function, not the disk!), ensure all datastore + // access is auth-checked. + // + // Here's the deal: We have auth checks on access to the database - at the + // time of writing this comment, only a subset of access is protected, and + // "Delete Disk" is actually one of the first targets of this auth check. + // + // However, there are contexts where we want to delete disks *outside* of + // calling the HTTP API-layer "delete disk" endpoint. As one example, during + // the "undo" part of the disk creation saga, we want to allow users to + // delete the disk they (partially) created. + // + // This gets a little tricky mapping back to user permissions - a user + // SHOULD be able to create a disk with the "create" permission, without the + // "delete" permission. To still make the call internally, we'd basically + // need to manufacture a token that identifies the ability to "create a + // disk, or delete a very specific disk with ID = ...". + pub async fn project_delete_disk_no_auth( + &self, + disk_id: &Uuid, + ) -> Result { + use db::schema::disk::dsl; + let pool = self.pool(); + let now = Utc::now(); + + let ok_to_delete_states = vec![ + api::external::DiskState::Detached, + api::external::DiskState::Faulted, + api::external::DiskState::Creating, + ]; + + let ok_to_delete_state_labels: Vec<_> = + ok_to_delete_states.iter().map(|s| s.label()).collect(); + let destroyed = api::external::DiskState::Destroyed.label(); + + let result = diesel::update(dsl::disk) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(*disk_id)) + .filter(dsl::disk_state.eq_any(ok_to_delete_state_labels)) + .filter(dsl::attach_instance_id.is_null()) + .set((dsl::disk_state.eq(destroyed), dsl::time_deleted.eq(now))) + .check_if_exists::(*disk_id) + .execute_and_check(pool) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::Disk, + LookupType::ById(*disk_id), + ), + ) + })?; + + match result.status { + UpdateStatus::Updated => Ok(result.found.volume_id), + UpdateStatus::NotUpdatedButExists => { + let disk = result.found; + let disk_state = disk.state(); + if disk.time_deleted().is_some() + && disk_state.state() + == &api::external::DiskState::Destroyed + { + // To maintain idempotency, if the disk has already been + // destroyed, don't throw an error. + return Ok(disk.volume_id); + } else if !ok_to_delete_states.contains(disk_state.state()) { + return Err(Error::InvalidRequest { + message: format!( + "disk cannot be deleted in state \"{}\"", + disk.runtime_state.disk_state + ), + }); + } else if disk_state.is_attached() { + return Err(Error::InvalidRequest { + message: String::from("disk is attached"), + }); + } else { + // NOTE: This is a "catch-all" error case, more specific + // errors should be preferred as they're more actionable. + return Err(Error::InternalError { + internal_message: String::from( + "disk exists, but cannot be deleted", + ), + }); + } + } + } + } +} diff --git a/nexus/src/db/datastore/global_image.rs b/nexus/src/db/datastore/global_image.rs new file mode 100644 index 00000000000..8d353de632c --- /dev/null +++ b/nexus/src/db/datastore/global_image.rs @@ -0,0 +1,68 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods related to [`GlobalImage`]s. + +use super::DataStore; +use crate::authz; +use crate::context::OpContext; +use crate::db; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::identity::Resource; +use crate::db::model::GlobalImage; +use crate::db::model::Name; +use crate::db::pagination::paginated; +use async_bb8_diesel::AsyncRunQueryDsl; +use diesel::prelude::*; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DataPageParams; +use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::ResourceType; + +impl DataStore { + pub async fn global_image_list_images( + &self, + opctx: &OpContext, + pagparams: &DataPageParams<'_, Name>, + ) -> ListResultVec { + opctx + .authorize(authz::Action::ListChildren, &authz::GLOBAL_IMAGE_LIST) + .await?; + + use db::schema::global_image::dsl; + paginated(dsl::global_image, dsl::name, pagparams) + .filter(dsl::time_deleted.is_null()) + .select(GlobalImage::as_select()) + .load_async::(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + pub async fn global_image_create_image( + &self, + opctx: &OpContext, + image: GlobalImage, + ) -> CreateResult { + opctx + .authorize(authz::Action::CreateChild, &authz::GLOBAL_IMAGE_LIST) + .await?; + + use db::schema::global_image::dsl; + let name = image.name().clone(); + diesel::insert_into(dsl::global_image) + .values(image) + .on_conflict(dsl::id) + .do_nothing() + .returning(GlobalImage::as_returning()) + .get_result_async(self.pool()) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict(ResourceType::Image, name.as_str()), + ) + }) + } +} diff --git a/nexus/src/db/datastore/identity_provider.rs b/nexus/src/db/datastore/identity_provider.rs new file mode 100644 index 00000000000..fabd0af463b --- /dev/null +++ b/nexus/src/db/datastore/identity_provider.rs @@ -0,0 +1,95 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods related to [`IdentityProvider`]s. + +use super::DataStore; +use crate::authz; +use crate::context::OpContext; +use crate::db; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::identity::Resource; +use crate::db::model::IdentityProvider; +use crate::db::model::Name; +use crate::db::pagination::paginated; +use async_bb8_diesel::AsyncConnection; +use async_bb8_diesel::AsyncRunQueryDsl; +use diesel::prelude::*; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DataPageParams; +use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::ResourceType; + +impl DataStore { + pub async fn identity_provider_list( + &self, + opctx: &OpContext, + authz_silo: &authz::Silo, + pagparams: &DataPageParams<'_, Name>, + ) -> ListResultVec { + opctx + .authorize(authz::Action::ListIdentityProviders, authz_silo) + .await?; + + use db::schema::identity_provider::dsl; + paginated(dsl::identity_provider, dsl::name, pagparams) + .filter(dsl::silo_id.eq(authz_silo.id())) + .filter(dsl::time_deleted.is_null()) + .select(IdentityProvider::as_select()) + .load_async::(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + pub async fn saml_identity_provider_create( + &self, + opctx: &OpContext, + authz_silo: &authz::Silo, + provider: db::model::SamlIdentityProvider, + ) -> CreateResult { + opctx.authorize(authz::Action::CreateChild, authz_silo).await?; + + let name = provider.identity().name.to_string(); + self.pool_authorized(opctx) + .await? + .transaction(move |conn| { + // insert silo identity provider record with type Saml + use db::schema::identity_provider::dsl as idp_dsl; + diesel::insert_into(idp_dsl::identity_provider) + .values(db::model::IdentityProvider { + identity: db::model::IdentityProviderIdentity { + id: provider.identity.id, + name: provider.identity.name.clone(), + description: provider.identity.description.clone(), + time_created: provider.identity.time_created, + time_modified: provider.identity.time_modified, + time_deleted: provider.identity.time_deleted, + }, + silo_id: provider.silo_id, + provider_type: db::model::IdentityProviderType::Saml, + }) + .execute(conn)?; + + // insert silo saml identity provider record + use db::schema::saml_identity_provider::dsl; + let result = diesel::insert_into(dsl::saml_identity_provider) + .values(provider) + .returning(db::model::SamlIdentityProvider::as_returning()) + .get_result(conn)?; + + Ok(result) + }) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict( + ResourceType::SamlIdentityProvider, + &name, + ), + ) + }) + } +} diff --git a/nexus/src/db/datastore/instance.rs b/nexus/src/db/datastore/instance.rs new file mode 100644 index 00000000000..27d330eb50a --- /dev/null +++ b/nexus/src/db/datastore/instance.rs @@ -0,0 +1,255 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods on [`Instance`]s. + +use super::DataStore; +use crate::authz; +use crate::authz::ApiResource; +use crate::context::OpContext; +use crate::db; +use crate::db::collection_detach_many::DatastoreDetachManyTarget; +use crate::db::collection_detach_many::DetachManyError; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::identity::Resource; +use crate::db::lookup::LookupPath; +use crate::db::model::Instance; +use crate::db::model::InstanceRuntimeState; +use crate::db::model::Name; +use crate::db::pagination::paginated; +use crate::db::update_and_check::UpdateAndCheck; +use crate::db::update_and_check::UpdateStatus; +use async_bb8_diesel::AsyncRunQueryDsl; +use chrono::Utc; +use diesel::prelude::*; +use omicron_common::api; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DataPageParams; +use omicron_common::api::external::DeleteResult; +use omicron_common::api::external::Error; +use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::LookupResult; +use omicron_common::api::external::LookupType; +use omicron_common::api::external::ResourceType; +use omicron_common::bail_unless; +use uuid::Uuid; + +impl DataStore { + /// Idempotently insert a database record for an Instance + /// + /// This is intended to be used by a saga action. When we say this is + /// idempotent, we mean that if this function succeeds and the caller + /// invokes it again with the same instance id, project id, creation + /// parameters, and initial runtime, then this operation will succeed and + /// return the current object in the database. Because this is intended for + /// use by sagas, we do assume that if the record exists, it should still be + /// in the "Creating" state. If it's in any other state, this function will + /// return with an error on the assumption that we don't really know what's + /// happened or how to proceed. + /// + /// ## Errors + /// + /// In addition to the usual database errors (e.g., no connections + /// available), this function can fail if there is already a different + /// instance (having a different id) with the same name in the same project. + // TODO-design Given that this is really oriented towards the saga + // interface, one wonders if it's even worth having an abstraction here, or + // if sagas shouldn't directly work with the database here (i.e., just do + // what this function does under the hood). + pub async fn project_create_instance( + &self, + instance: Instance, + ) -> CreateResult { + use db::schema::instance::dsl; + + let gen = instance.runtime().gen; + let name = instance.name().clone(); + let instance: Instance = diesel::insert_into(dsl::instance) + .values(instance) + .on_conflict(dsl::id) + .do_nothing() + .returning(Instance::as_returning()) + .get_result_async(self.pool()) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict( + ResourceType::Instance, + name.as_str(), + ), + ) + })?; + + bail_unless!( + instance.runtime().state.state() + == &api::external::InstanceState::Creating, + "newly-created Instance has unexpected state: {:?}", + instance.runtime().state + ); + bail_unless!( + instance.runtime().gen == gen, + "newly-created Instance has unexpected generation: {:?}", + instance.runtime().gen + ); + Ok(instance) + } + + pub async fn project_list_instances( + &self, + opctx: &OpContext, + authz_project: &authz::Project, + pagparams: &DataPageParams<'_, Name>, + ) -> ListResultVec { + opctx.authorize(authz::Action::ListChildren, authz_project).await?; + + use db::schema::instance::dsl; + paginated(dsl::instance, dsl::name, &pagparams) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::project_id.eq(authz_project.id())) + .select(Instance::as_select()) + .load_async::(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + /// Fetches information about an Instance that the caller has previously + /// fetched + /// + /// See disk_refetch(). + pub async fn instance_refetch( + &self, + opctx: &OpContext, + authz_instance: &authz::Instance, + ) -> LookupResult { + let (.., db_instance) = LookupPath::new(opctx, self) + .instance_id(authz_instance.id()) + .fetch() + .await + .map_err(|e| match e { + // Use the "not found" message of the authz object we were + // given, which will reflect however the caller originally + // looked it up. + Error::ObjectNotFound { .. } => authz_instance.not_found(), + e => e, + })?; + Ok(db_instance) + } + + // TODO-design It's tempting to return the updated state of the Instance + // here because it's convenient for consumers and by using a RETURNING + // clause, we could ensure that the "update" and "fetch" are atomic. + // But in the unusual case that we _don't_ update the row because our + // update is older than the one in the database, we would have to fetch + // the current state explicitly. For now, we'll just require consumers + // to explicitly fetch the state if they want that. + pub async fn instance_update_runtime( + &self, + instance_id: &Uuid, + new_runtime: &InstanceRuntimeState, + ) -> Result { + use db::schema::instance::dsl; + + let updated = diesel::update(dsl::instance) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(*instance_id)) + .filter(dsl::state_generation.lt(new_runtime.gen)) + .filter( + dsl::migration_id + .is_null() + .or(dsl::target_propolis_id.eq(new_runtime.propolis_id)), + ) + .set(new_runtime.clone()) + .check_if_exists::(*instance_id) + .execute_and_check(self.pool()) + .await + .map(|r| match r.status { + UpdateStatus::Updated => true, + UpdateStatus::NotUpdatedButExists => false, + }) + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::Instance, + LookupType::ById(*instance_id), + ), + ) + })?; + + Ok(updated) + } + + pub async fn project_delete_instance( + &self, + opctx: &OpContext, + authz_instance: &authz::Instance, + ) -> DeleteResult { + opctx.authorize(authz::Action::Delete, authz_instance).await?; + + // This is subject to change, but for now we're going to say that an + // instance must be "stopped" or "failed" in order to delete it. The + // delete operation sets "time_deleted" (just like with other objects) + // and also sets the state to "destroyed". + use api::external::InstanceState as ApiInstanceState; + use db::model::InstanceState as DbInstanceState; + use db::schema::{disk, instance}; + + let stopped = DbInstanceState::new(ApiInstanceState::Stopped); + let failed = DbInstanceState::new(ApiInstanceState::Failed); + let destroyed = DbInstanceState::new(ApiInstanceState::Destroyed); + let ok_to_delete_instance_states = vec![stopped, failed]; + + let detached_label = api::external::DiskState::Detached.label(); + let ok_to_detach_disk_states = + vec![api::external::DiskState::Attached(authz_instance.id())]; + let ok_to_detach_disk_state_labels: Vec<_> = + ok_to_detach_disk_states.iter().map(|s| s.label()).collect(); + + let _instance = Instance::detach_resources( + authz_instance.id(), + instance::table.into_boxed().filter( + instance::dsl::state.eq_any(ok_to_delete_instance_states), + ), + disk::table.into_boxed().filter( + disk::dsl::disk_state.eq_any(ok_to_detach_disk_state_labels), + ), + diesel::update(instance::dsl::instance).set(( + instance::dsl::state.eq(destroyed), + instance::dsl::time_deleted.eq(Utc::now()), + )), + diesel::update(disk::dsl::disk).set(( + disk::dsl::disk_state.eq(detached_label), + disk::dsl::attach_instance_id.eq(Option::::None), + )), + ) + .detach_and_get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| match e { + DetachManyError::CollectionNotFound => Error::not_found_by_id( + ResourceType::Instance, + &authz_instance.id(), + ), + DetachManyError::NoUpdate { collection } => { + let instance_state = collection.runtime_state.state.state(); + match instance_state { + api::external::InstanceState::Stopped + | api::external::InstanceState::Failed => { + Error::internal_error("cannot delete instance") + } + _ => Error::invalid_request(&format!( + "instance cannot be deleted in state \"{}\"", + instance_state, + )), + } + } + DetachManyError::DatabaseError(e) => { + public_error_from_diesel_pool(e, ErrorHandler::Server) + } + })?; + + Ok(()) + } +} diff --git a/nexus/src/db/datastore/instance_external_ip.rs b/nexus/src/db/datastore/instance_external_ip.rs new file mode 100644 index 00000000000..96b9d319f0d --- /dev/null +++ b/nexus/src/db/datastore/instance_external_ip.rs @@ -0,0 +1,119 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods on [`InstanceExternalIp`]s. + +use super::DataStore; +use crate::context::OpContext; +use crate::db; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::model::IncompleteInstanceExternalIp; +use crate::db::model::InstanceExternalIp; +use crate::db::queries::external_ip::NextExternalIp; +use crate::db::update_and_check::UpdateAndCheck; +use crate::db::update_and_check::UpdateStatus; +use async_bb8_diesel::AsyncRunQueryDsl; +use chrono::Utc; +use diesel::prelude::*; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DeleteResult; +use omicron_common::api::external::Error; +use omicron_common::api::external::LookupResult; +use uuid::Uuid; + +impl DataStore { + /// Create an external IP address for an instance. + // TODO-correctness: This should be made idempotent. + pub async fn allocate_instance_external_ip( + &self, + opctx: &OpContext, + instance_id: Uuid, + ) -> CreateResult { + let query = + NextExternalIp::new(IncompleteInstanceExternalIp::new(instance_id)); + query + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + use async_bb8_diesel::ConnectionError::Query; + use async_bb8_diesel::PoolError::Connection; + use diesel::result::Error::NotFound; + match e { + Connection(Query(NotFound)) => Error::invalid_request( + "No external IP addresses available for new instance", + ), + _ => public_error_from_diesel_pool(e, ErrorHandler::Server), + } + }) + } + + /// Deallocate the external IP address with the provided ID. + /// + /// To support idempotency, such as in saga operations, this method returns + /// an extra boolean, rather than the usual `DeleteResult`. The meaning of + /// return values are: + /// + /// - `Ok(true)`: The record was deleted during this call + /// - `Ok(false)`: The record was already deleted, such as by a previous + /// call + /// - `Err(_)`: Any other condition, including a non-existent record. + pub async fn deallocate_instance_external_ip( + &self, + opctx: &OpContext, + ip_id: Uuid, + ) -> Result { + use db::schema::instance_external_ip::dsl; + let now = Utc::now(); + diesel::update(dsl::instance_external_ip) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(ip_id)) + .set(dsl::time_deleted.eq(now)) + .check_if_exists::(ip_id) + .execute_and_check(self.pool_authorized(opctx).await?) + .await + .map(|r| match r.status { + UpdateStatus::Updated => true, + UpdateStatus::NotUpdatedButExists => false, + }) + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + /// Delete all external IP addresses associated with the provided instance + /// ID. + // TODO-correctness: This should be made idempotent. + pub async fn deallocate_instance_external_ip_by_instance_id( + &self, + opctx: &OpContext, + instance_id: Uuid, + ) -> DeleteResult { + use db::schema::instance_external_ip::dsl; + let now = Utc::now(); + diesel::update(dsl::instance_external_ip) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::instance_id.eq(instance_id)) + .set(dsl::time_deleted.eq(now)) + .execute_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool(e, ErrorHandler::Server) + })?; + Ok(()) + } + + pub async fn instance_lookup_external_ip( + &self, + opctx: &OpContext, + instance_id: Uuid, + ) -> LookupResult { + use db::schema::instance_external_ip::dsl; + dsl::instance_external_ip + .filter(dsl::instance_id.eq(instance_id)) + .filter(dsl::time_deleted.is_null()) + .select(InstanceExternalIp::as_select()) + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } +} diff --git a/nexus/src/db/datastore/ip_pool.rs b/nexus/src/db/datastore/ip_pool.rs new file mode 100644 index 00000000000..e2b6503ae8d --- /dev/null +++ b/nexus/src/db/datastore/ip_pool.rs @@ -0,0 +1,336 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods on [`IpPool`]s. + +use super::DataStore; +use crate::authz; +use crate::context::OpContext; +use crate::db; +use crate::db::collection_insert::AsyncInsertError; +use crate::db::collection_insert::DatastoreCollection; +use crate::db::error::diesel_pool_result_optional; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::identity::Resource; +use crate::db::model::IpPool; +use crate::db::model::IpPoolRange; +use crate::db::model::IpPoolUpdate; +use crate::db::model::Name; +use crate::db::pagination::paginated; +use crate::db::queries::ip_pool::FilterOverlappingIpRanges; +use crate::external_api::params; +use crate::external_api::shared::IpRange; +use async_bb8_diesel::AsyncRunQueryDsl; +use chrono::Utc; +use diesel::prelude::*; +use ipnetwork::IpNetwork; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DataPageParams; +use omicron_common::api::external::DeleteResult; +use omicron_common::api::external::Error; +use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::LookupType; +use omicron_common::api::external::ResourceType; +use omicron_common::api::external::UpdateResult; +use uuid::Uuid; + +impl DataStore { + /// List IP Pools by their name + pub async fn ip_pools_list_by_name( + &self, + opctx: &OpContext, + pagparams: &DataPageParams<'_, Name>, + ) -> ListResultVec { + use db::schema::ip_pool::dsl; + opctx + .authorize(authz::Action::ListChildren, &authz::IP_POOL_LIST) + .await?; + paginated(dsl::ip_pool, dsl::name, pagparams) + .filter(dsl::time_deleted.is_null()) + .select(db::model::IpPool::as_select()) + .get_results_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + /// List IP Pools by their IDs + pub async fn ip_pools_list_by_id( + &self, + opctx: &OpContext, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + use db::schema::ip_pool::dsl; + opctx + .authorize(authz::Action::ListChildren, &authz::IP_POOL_LIST) + .await?; + paginated(dsl::ip_pool, dsl::id, pagparams) + .filter(dsl::time_deleted.is_null()) + .select(db::model::IpPool::as_select()) + .get_results_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + pub async fn ip_pool_create( + &self, + opctx: &OpContext, + new_pool: ¶ms::IpPoolCreate, + ) -> CreateResult { + use db::schema::ip_pool::dsl; + opctx + .authorize(authz::Action::CreateChild, &authz::IP_POOL_LIST) + .await?; + let pool = IpPool::new(&new_pool.identity); + let pool_name = pool.name().as_str().to_string(); + diesel::insert_into(dsl::ip_pool) + .values(pool) + .returning(IpPool::as_returning()) + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict(ResourceType::IpPool, &pool_name), + ) + }) + } + + pub async fn ip_pool_delete( + &self, + opctx: &OpContext, + authz_pool: &authz::IpPool, + db_pool: &IpPool, + ) -> DeleteResult { + use db::schema::ip_pool::dsl; + use db::schema::ip_pool_range; + opctx.authorize(authz::Action::Delete, authz_pool).await?; + + // Verify there are no IP ranges still in this pool + let range = diesel_pool_result_optional( + ip_pool_range::dsl::ip_pool_range + .filter(ip_pool_range::dsl::ip_pool_id.eq(authz_pool.id())) + .filter(ip_pool_range::dsl::time_deleted.is_null()) + .select(ip_pool_range::dsl::id) + .limit(1) + .first_async::(self.pool_authorized(opctx).await?) + .await, + ) + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; + if range.is_some() { + return Err(Error::InvalidRequest { + message: + "IP Pool cannot be deleted while it contains IP ranges" + .to_string(), + }); + } + + // Delete the pool, conditional on the rcgen not having changed. This + // protects the delete from occuring if clients created a new IP range + // in between the above check for children and this query. + let now = Utc::now(); + let updated_rows = diesel::update(dsl::ip_pool) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(authz_pool.id())) + .filter(dsl::rcgen.eq(db_pool.rcgen)) + .set(dsl::time_deleted.eq(now)) + .execute_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByResource(authz_pool), + ) + })?; + + if updated_rows == 0 { + return Err(Error::InvalidRequest { + message: "deletion failed due to concurrent modification" + .to_string(), + }); + } + Ok(()) + } + + pub async fn ip_pool_update( + &self, + opctx: &OpContext, + authz_pool: &authz::IpPool, + updates: IpPoolUpdate, + ) -> UpdateResult { + use db::schema::ip_pool::dsl; + opctx.authorize(authz::Action::Modify, authz_pool).await?; + diesel::update(dsl::ip_pool) + .filter(dsl::id.eq(authz_pool.id())) + .filter(dsl::time_deleted.is_null()) + .set(updates) + .returning(IpPool::as_returning()) + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByResource(authz_pool), + ) + }) + } + + pub async fn ip_pool_list_ranges( + &self, + opctx: &OpContext, + authz_pool: &authz::IpPool, + pag_params: &DataPageParams<'_, IpNetwork>, + ) -> ListResultVec { + use db::schema::ip_pool_range::dsl; + opctx.authorize(authz::Action::ListChildren, authz_pool).await?; + paginated(dsl::ip_pool_range, dsl::first_address, pag_params) + .filter(dsl::ip_pool_id.eq(authz_pool.id())) + .filter(dsl::time_deleted.is_null()) + .select(IpPoolRange::as_select()) + .get_results_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByResource(authz_pool), + ) + }) + } + + pub async fn ip_pool_add_range( + &self, + opctx: &OpContext, + authz_pool: &authz::IpPool, + range: &IpRange, + ) -> CreateResult { + use db::schema::ip_pool_range::dsl; + opctx.authorize(authz::Action::CreateChild, authz_pool).await?; + let pool_id = authz_pool.id(); + let new_range = IpPoolRange::new(range, pool_id); + let filter_subquery = FilterOverlappingIpRanges { range: new_range }; + let insert_query = + diesel::insert_into(dsl::ip_pool_range).values(filter_subquery); + IpPool::insert_resource(pool_id, insert_query) + .insert_and_get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + use async_bb8_diesel::ConnectionError::Query; + use async_bb8_diesel::PoolError::Connection; + use diesel::result::Error::NotFound; + + match e { + AsyncInsertError::DatabaseError(Connection(Query( + NotFound, + ))) => { + // We've filtered out the IP addresses the client provided, + // i.e., there's some overlap with existing addresses. + Error::invalid_request( + format!( + "The provided IP range {}-{} overlaps with \ + an existing range", + range.first_address(), + range.last_address(), + ) + .as_str(), + ) + } + AsyncInsertError::CollectionNotFound => { + Error::ObjectNotFound { + type_name: ResourceType::IpPool, + lookup_type: LookupType::ById(pool_id), + } + } + AsyncInsertError::DatabaseError(err) => { + public_error_from_diesel_pool(err, ErrorHandler::Server) + } + } + }) + } + + pub async fn ip_pool_delete_range( + &self, + opctx: &OpContext, + authz_pool: &authz::IpPool, + range: &IpRange, + ) -> DeleteResult { + use db::schema::instance_external_ip; + use db::schema::ip_pool_range::dsl; + opctx.authorize(authz::Action::Modify, authz_pool).await?; + + let pool_id = authz_pool.id(); + let first_address = range.first_address(); + let last_address = range.last_address(); + let first_net = ipnetwork::IpNetwork::from(first_address); + let last_net = ipnetwork::IpNetwork::from(last_address); + + // Fetch the range itself, if it exists. We'll need to protect against + // concurrent inserts of new external IPs from the target range by + // comparing the rcgen. + let range = diesel_pool_result_optional( + dsl::ip_pool_range + .filter(dsl::ip_pool_id.eq(pool_id)) + .filter(dsl::first_address.eq(first_net)) + .filter(dsl::last_address.eq(last_net)) + .filter(dsl::time_deleted.is_null()) + .select(IpPoolRange::as_select()) + .get_result_async::( + self.pool_authorized(opctx).await?, + ) + .await, + ) + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))? + .ok_or_else(|| { + Error::invalid_request( + format!( + "The provided range {}-{} does not exist", + first_address, last_address, + ) + .as_str(), + ) + })?; + + // Find external IPs allocated out of this pool and range. + let range_id = range.id; + let has_children = diesel::dsl::select(diesel::dsl::exists( + instance_external_ip::table + .filter(instance_external_ip::dsl::ip_pool_id.eq(pool_id)) + .filter( + instance_external_ip::dsl::ip_pool_range_id.eq(range_id), + ) + .filter(instance_external_ip::dsl::time_deleted.is_null()), + )) + .get_result_async::(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; + if has_children { + return Err(Error::invalid_request( + "IP pool ranges cannot be deleted while \ + external IP addresses are allocated from them", + )); + } + + // Delete the range, conditional on the rcgen not having changed. This + // protects the delete from occuring if clients allocated a new external + // IP address in between the above check for children and this query. + let rcgen = range.rcgen; + let now = Utc::now(); + let updated_rows = diesel::update( + dsl::ip_pool_range + .find(range_id) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::rcgen.eq(rcgen)), + ) + .set(dsl::time_deleted.eq(now)) + .execute_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; + if updated_rows == 1 { + Ok(()) + } else { + Err(Error::invalid_request( + "IP range deletion failed due to concurrent modification", + )) + } + } +} diff --git a/nexus/src/db/datastore/mod.rs b/nexus/src/db/datastore/mod.rs new file mode 100644 index 00000000000..2d11e01a759 --- /dev/null +++ b/nexus/src/db/datastore/mod.rs @@ -0,0 +1,1061 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Primary control plane interface for database read and write operations + +// TODO-scalability review all queries for use of indexes (may need +// "time_deleted IS NOT NULL" conditions) Figure out how to automate this. +// +// TODO-design Better support for joins? +// The interfaces here often require that to do anything with an object, a +// caller must first look up the id and then do operations with the id. For +// example, the caller of project_list_disks() always looks up the project to +// get the project_id, then lists disks having that project_id. It's possible +// to implement this instead with a JOIN in the database so that we do it with +// one database round-trip. We could use CTEs similar to what we do with +// conditional updates to distinguish the case where the project didn't exist +// vs. there were no disks in it. This seems likely to be a fair bit more +// complicated to do safely and generally compared to what we have now. + +use super::pool::DbConnection; +use super::Pool; +use crate::authz; +use crate::context::OpContext; +use crate::db::{ + self, + error::{public_error_from_diesel_pool, ErrorHandler}, +}; +use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionManager}; +use diesel::pg::Pg; +use diesel::prelude::*; +use diesel::query_builder::{QueryFragment, QueryId}; +use diesel::query_dsl::methods::LoadQuery; +use diesel::{ExpressionMethods, QueryDsl}; +use omicron_common::api::external::Error; +use omicron_common::api::external::IdentityMetadataCreateParams; +use omicron_common::api::external::LookupType; +use omicron_common::api::external::ResourceType; +use std::net::Ipv6Addr; +use std::sync::Arc; +use uuid::Uuid; + +mod console_session; +mod dataset; +mod device_auth; +mod disk; +mod global_image; +mod identity_provider; +mod instance; +mod instance_external_ip; +mod ip_pool; +mod network_interface; +mod organization; +mod oximeter; +mod project; +mod rack; +mod region; +mod role; +mod saga; +mod service; +mod silo; +mod silo_user; +mod sled; +mod ssh_key; +mod update; +mod volume; +mod vpc; +mod zpool; + +// Number of unique datasets required to back a region. +// TODO: This should likely turn into a configuration option. +const REGION_REDUNDANCY_THRESHOLD: usize = 3; + +// Represents a query that is ready to be executed. +// +// This helper trait lets the statement either be executed or explained. +// +// U: The output type of executing the statement. +trait RunnableQuery: + RunQueryDsl + + QueryFragment + + LoadQuery<'static, DbConnection, U> + + QueryId +{ +} + +impl RunnableQuery for T where + T: RunQueryDsl + + QueryFragment + + LoadQuery<'static, DbConnection, U> + + QueryId +{ +} + +pub struct DataStore { + pool: Arc, +} + +// The majority of `DataStore`'s methods live in our submodules as a concession +// to compilation times; changing a query only requires incremental +// recompilation of that query's module instead of all queries on `DataStore`. +impl DataStore { + pub fn new(pool: Arc) -> Self { + DataStore { pool } + } + + // TODO-security This should be deprecated in favor of pool_authorized(), + // which gives us the chance to do a minimal security check before hitting + // the database. Eventually, this function should only be used for doing + // authentication in the first place (since we can't do an authz check in + // that case). + fn pool(&self) -> &bb8::Pool> { + self.pool.pool() + } + + pub(super) async fn pool_authorized( + &self, + opctx: &OpContext, + ) -> Result<&bb8::Pool>, Error> { + opctx.authorize(authz::Action::Query, &authz::DATABASE).await?; + Ok(self.pool.pool()) + } + + /// Return the next available IPv6 address for an Oxide service running on + /// the provided sled. + pub async fn next_ipv6_address( + &self, + opctx: &OpContext, + sled_id: Uuid, + ) -> Result { + use db::schema::sled::dsl; + let net = diesel::update( + dsl::sled.find(sled_id).filter(dsl::time_deleted.is_null()), + ) + .set(dsl::last_used_address.eq(dsl::last_used_address + 1)) + .returning(dsl::last_used_address) + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::Sled, + LookupType::ById(sled_id), + ), + ) + })?; + + // TODO-correctness: We need to ensure that this address is actually + // within the sled's underlay prefix, once that's included in the + // database record. + match net { + ipnetwork::IpNetwork::V6(net) => Ok(net.ip()), + _ => Err(Error::InternalError { + internal_message: String::from("Sled IP address must be IPv6"), + }), + } + } + + // Test interfaces + + #[cfg(test)] + async fn test_try_table_scan(&self, opctx: &OpContext) -> Error { + use db::schema::project::dsl; + let conn = self.pool_authorized(opctx).await; + if let Err(error) = conn { + return error; + } + let result = dsl::project + .select(diesel::dsl::count_star()) + .first_async::(conn.unwrap()) + .await; + match result { + Ok(_) => Error::internal_error("table scan unexpectedly succeeded"), + Err(error) => { + public_error_from_diesel_pool(error, ErrorHandler::Server) + } + } + } +} + +/// Constructs a DataStore for use in test suites that has preloaded the +/// built-in users, roles, and role assignments that are needed for basic +/// operation +#[cfg(test)] +pub async fn datastore_test( + logctx: &dropshot::test_util::LogContext, + db: &omicron_test_utils::dev::db::CockroachInstance, +) -> (OpContext, Arc) { + use crate::authn; + + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = Arc::new(db::Pool::new(&cfg)); + let datastore = Arc::new(DataStore::new(pool)); + + // Create an OpContext with the credentials of "db-init" just for the + // purpose of loading the built-in users, roles, and assignments. + let opctx = OpContext::for_background( + logctx.log.new(o!()), + Arc::new(authz::Authz::new(&logctx.log)), + authn::Context::internal_db_init(), + Arc::clone(&datastore), + ); + datastore.load_builtin_users(&opctx).await.unwrap(); + datastore.load_builtin_roles(&opctx).await.unwrap(); + datastore.load_builtin_role_asgns(&opctx).await.unwrap(); + datastore.load_builtin_silos(&opctx).await.unwrap(); + datastore.load_silo_users(&opctx).await.unwrap(); + datastore.load_silo_user_role_assignments(&opctx).await.unwrap(); + + // Create an OpContext with the credentials of "test-privileged" for general + // testing. + let opctx = + OpContext::for_tests(logctx.log.new(o!()), Arc::clone(&datastore)); + + (opctx, datastore) +} + +#[cfg(test)] +mod test { + use super::*; + use crate::authn; + use crate::authz; + use crate::db::explain::ExplainableAsync; + use crate::db::fixed_data::silo::SILO_ID; + use crate::db::identity::Asset; + use crate::db::identity::Resource; + use crate::db::lookup::LookupPath; + use crate::db::model::Dataset; + use crate::db::model::InstanceExternalIp; + use crate::db::model::Rack; + use crate::db::model::Region; + use crate::db::model::Service; + use crate::db::model::SiloUser; + use crate::db::model::Sled; + use crate::db::model::SshKey; + use crate::db::model::VpcSubnet; + use crate::db::model::Zpool; + use crate::db::model::{ConsoleSession, DatasetKind, Project, ServiceKind}; + use crate::db::queries::vpc_subnet::FilterConflictingVpcSubnetRangesQuery; + use crate::external_api::params; + use chrono::{Duration, Utc}; + use nexus_test_utils::db::test_setup_database; + use omicron_common::api::external::{ + ByteCount, Error, IdentityMetadataCreateParams, LookupType, Name, + }; + use omicron_test_utils::dev; + use std::collections::HashSet; + use std::net::Ipv6Addr; + use std::net::SocketAddrV6; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + use uuid::Uuid; + + #[tokio::test] + async fn test_project_creation() { + let logctx = dev::test_setup_log("test_project_creation"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + let organization = params::OrganizationCreate { + identity: IdentityMetadataCreateParams { + name: "org".parse().unwrap(), + description: "desc".to_string(), + }, + }; + + let organization = + datastore.organization_create(&opctx, &organization).await.unwrap(); + + let project = Project::new( + organization.id(), + params::ProjectCreate { + identity: IdentityMetadataCreateParams { + name: "project".parse().unwrap(), + description: "desc".to_string(), + }, + }, + ); + let (.., authz_org) = LookupPath::new(&opctx, &datastore) + .organization_id(organization.id()) + .lookup_for(authz::Action::CreateChild) + .await + .unwrap(); + datastore.project_create(&opctx, &authz_org, project).await.unwrap(); + + let (.., organization_after_project_create) = + LookupPath::new(&opctx, &datastore) + .organization_name(organization.name()) + .fetch() + .await + .unwrap(); + assert!(organization_after_project_create.rcgen > organization.rcgen); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_session_methods() { + let logctx = dev::test_setup_log("test_session_methods"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + let authn_opctx = OpContext::for_background( + logctx.log.new(o!("component" => "TestExternalAuthn")), + Arc::new(authz::Authz::new(&logctx.log)), + authn::Context::external_authn(), + Arc::clone(&datastore), + ); + + let token = "a_token".to_string(); + let silo_user_id = Uuid::new_v4(); + + let session = ConsoleSession { + token: token.clone(), + time_created: Utc::now() - Duration::minutes(5), + time_last_used: Utc::now() - Duration::minutes(5), + silo_user_id, + }; + + let _ = datastore + .session_create(&authn_opctx, session.clone()) + .await + .unwrap(); + + // Associate silo with user + datastore + .silo_user_create(SiloUser::new( + *SILO_ID, + silo_user_id, + "external_id".into(), + )) + .await + .unwrap(); + + let (.., db_silo_user) = LookupPath::new(&opctx, &datastore) + .silo_user_id(session.silo_user_id) + .fetch() + .await + .unwrap(); + assert_eq!(*SILO_ID, db_silo_user.silo_id); + + // fetch the one we just created + let (.., fetched) = LookupPath::new(&opctx, &datastore) + .console_session_token(&token) + .fetch() + .await + .unwrap(); + assert_eq!(session.silo_user_id, fetched.silo_user_id); + + // trying to insert the same one again fails + let duplicate = + datastore.session_create(&authn_opctx, session.clone()).await; + assert!(matches!( + duplicate, + Err(Error::InternalError { internal_message: _ }) + )); + + // update last used (i.e., renew token) + let authz_session = authz::ConsoleSession::new( + authz::FLEET, + token.clone(), + LookupType::ByCompositeId(token.clone()), + ); + let renewed = datastore + .session_update_last_used(&opctx, &authz_session) + .await + .unwrap(); + assert!( + renewed.console_session.time_last_used > session.time_last_used + ); + + // time_last_used change persists in DB + let (.., fetched) = LookupPath::new(&opctx, &datastore) + .console_session_token(&token) + .fetch() + .await + .unwrap(); + assert!(fetched.time_last_used > session.time_last_used); + + // deleting it using `opctx` (which represents the test-privileged user) + // should succeed but not do anything -- you can't delete someone else's + // session + let delete = + datastore.session_hard_delete(&opctx, &authz_session).await; + assert_eq!(delete, Ok(())); + let fetched = LookupPath::new(&opctx, &datastore) + .console_session_token(&token) + .fetch() + .await; + assert!(fetched.is_ok()); + + // delete it and fetch should come back with nothing + let silo_user_opctx = OpContext::for_background( + logctx.log.new(o!()), + Arc::new(authz::Authz::new(&logctx.log)), + authn::Context::test_silo_user(*SILO_ID, silo_user_id), + Arc::clone(&datastore), + ); + let delete = datastore + .session_hard_delete(&silo_user_opctx, &authz_session) + .await; + assert_eq!(delete, Ok(())); + let fetched = LookupPath::new(&opctx, &datastore) + .console_session_token(&token) + .fetch() + .await; + assert!(matches!( + fetched, + Err(Error::ObjectNotFound { type_name: _, lookup_type: _ }) + )); + + // deleting an already nonexistent is considered a success + let delete_again = + datastore.session_hard_delete(&opctx, &authz_session).await; + assert_eq!(delete_again, Ok(())); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + // Creates a test sled, returns its UUID. + async fn create_test_sled(datastore: &DataStore) -> Uuid { + let bogus_addr = SocketAddrV6::new( + Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 1), + 8080, + 0, + 0, + ); + let rack_id = Uuid::new_v4(); + let sled_id = Uuid::new_v4(); + let sled = Sled::new(sled_id, bogus_addr.clone(), rack_id); + datastore.sled_upsert(sled).await.unwrap(); + sled_id + } + + fn test_zpool_size() -> ByteCount { + ByteCount::from_gibibytes_u32(100) + } + + // Creates a test zpool, returns its UUID. + async fn create_test_zpool(datastore: &DataStore, sled_id: Uuid) -> Uuid { + let zpool_id = Uuid::new_v4(); + let zpool = Zpool::new( + zpool_id, + sled_id, + &crate::internal_api::params::ZpoolPutRequest { + size: test_zpool_size(), + }, + ); + datastore.zpool_upsert(zpool).await.unwrap(); + zpool_id + } + + fn create_test_disk_create_params( + name: &str, + size: ByteCount, + ) -> params::DiskCreate { + params::DiskCreate { + identity: IdentityMetadataCreateParams { + name: Name::try_from(name.to_string()).unwrap(), + description: name.to_string(), + }, + disk_source: params::DiskSource::Blank { + block_size: params::BlockSize::try_from(4096).unwrap(), + }, + size, + } + } + + #[tokio::test] + async fn test_region_allocation() { + let logctx = dev::test_setup_log("test_region_allocation"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + let datastore = Arc::new(DataStore::new(Arc::new(pool))); + let opctx = + OpContext::for_tests(logctx.log.new(o!()), datastore.clone()); + + // Create a sled... + let sled_id = create_test_sled(&datastore).await; + + // ... and a zpool within that sled... + let zpool_id = create_test_zpool(&datastore, sled_id).await; + + // ... and datasets within that zpool. + let dataset_count = REGION_REDUNDANCY_THRESHOLD * 2; + let bogus_addr = + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + let dataset_ids: Vec = + (0..dataset_count).map(|_| Uuid::new_v4()).collect(); + for id in &dataset_ids { + let dataset = + Dataset::new(*id, zpool_id, bogus_addr, DatasetKind::Crucible); + datastore.dataset_upsert(dataset).await.unwrap(); + } + + // Allocate regions from the datasets for this disk. + let params = create_test_disk_create_params( + "disk1", + ByteCount::from_mebibytes_u32(500), + ); + let volume1_id = Uuid::new_v4(); + // Currently, we only allocate one Region Set per volume. + let expected_region_count = REGION_REDUNDANCY_THRESHOLD; + let dataset_and_regions = datastore + .region_allocate(&opctx, volume1_id, ¶ms) + .await + .unwrap(); + + // Verify the allocation. + assert_eq!(expected_region_count, dataset_and_regions.len()); + let mut disk1_datasets = HashSet::new(); + for (dataset, region) in dataset_and_regions { + assert!(disk1_datasets.insert(dataset.id())); + assert_eq!(volume1_id, region.volume_id()); + assert_eq!(ByteCount::from(4096), region.block_size()); + assert_eq!(params.extent_size() / 4096, region.blocks_per_extent()); + assert_eq!(params.extent_count(), region.extent_count()); + } + + // Allocate regions for a second disk. Observe that we allocate from + // the three previously unused datasets. + let params = create_test_disk_create_params( + "disk2", + ByteCount::from_mebibytes_u32(500), + ); + let volume2_id = Uuid::new_v4(); + let dataset_and_regions = datastore + .region_allocate(&opctx, volume2_id, ¶ms) + .await + .unwrap(); + assert_eq!(expected_region_count, dataset_and_regions.len()); + let mut disk2_datasets = HashSet::new(); + for (dataset, region) in dataset_and_regions { + assert!(disk2_datasets.insert(dataset.id())); + assert_eq!(volume2_id, region.volume_id()); + assert_eq!(ByteCount::from(4096), region.block_size()); + assert_eq!(params.extent_size() / 4096, region.blocks_per_extent()); + assert_eq!(params.extent_count(), region.extent_count()); + } + + // Double-check that the datasets used for the first disk weren't + // used when allocating the second disk. + assert_eq!(0, disk1_datasets.intersection(&disk2_datasets).count()); + + let _ = db.cleanup().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_region_allocation_is_idempotent() { + let logctx = + dev::test_setup_log("test_region_allocation_is_idempotent"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + let datastore = Arc::new(DataStore::new(Arc::new(pool))); + let opctx = + OpContext::for_tests(logctx.log.new(o!()), datastore.clone()); + + // Create a sled... + let sled_id = create_test_sled(&datastore).await; + + // ... and a zpool within that sled... + let zpool_id = create_test_zpool(&datastore, sled_id).await; + + // ... and datasets within that zpool. + let dataset_count = REGION_REDUNDANCY_THRESHOLD; + let bogus_addr = + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + let dataset_ids: Vec = + (0..dataset_count).map(|_| Uuid::new_v4()).collect(); + for id in &dataset_ids { + let dataset = + Dataset::new(*id, zpool_id, bogus_addr, DatasetKind::Crucible); + datastore.dataset_upsert(dataset).await.unwrap(); + } + + // Allocate regions from the datasets for this volume. + let params = create_test_disk_create_params( + "disk", + ByteCount::from_mebibytes_u32(500), + ); + let volume_id = Uuid::new_v4(); + let mut dataset_and_regions1 = datastore + .region_allocate(&opctx, volume_id, ¶ms) + .await + .unwrap(); + let mut dataset_and_regions2 = datastore + .region_allocate(&opctx, volume_id, ¶ms) + .await + .unwrap(); + + // Give them a consistent order so we can easily compare them. + let sort_vec = |v: &mut Vec<(Dataset, Region)>| { + v.sort_by(|(d1, r1), (d2, r2)| { + let order = d1.id().cmp(&d2.id()); + match order { + std::cmp::Ordering::Equal => r1.id().cmp(&r2.id()), + _ => order, + } + }); + }; + sort_vec(&mut dataset_and_regions1); + sort_vec(&mut dataset_and_regions2); + + // Validate that the two calls to allocate return the same data. + assert_eq!(dataset_and_regions1.len(), dataset_and_regions2.len()); + for i in 0..dataset_and_regions1.len() { + assert_eq!(dataset_and_regions1[i], dataset_and_regions2[i],); + } + + let _ = db.cleanup().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_region_allocation_not_enough_datasets() { + let logctx = + dev::test_setup_log("test_region_allocation_not_enough_datasets"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + let datastore = Arc::new(DataStore::new(Arc::new(pool))); + let opctx = + OpContext::for_tests(logctx.log.new(o!()), datastore.clone()); + + // Create a sled... + let sled_id = create_test_sled(&datastore).await; + + // ... and a zpool within that sled... + let zpool_id = create_test_zpool(&datastore, sled_id).await; + + // ... and datasets within that zpool. + let dataset_count = REGION_REDUNDANCY_THRESHOLD - 1; + let bogus_addr = + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + let dataset_ids: Vec = + (0..dataset_count).map(|_| Uuid::new_v4()).collect(); + for id in &dataset_ids { + let dataset = + Dataset::new(*id, zpool_id, bogus_addr, DatasetKind::Crucible); + datastore.dataset_upsert(dataset).await.unwrap(); + } + + // Allocate regions from the datasets for this volume. + let params = create_test_disk_create_params( + "disk1", + ByteCount::from_mebibytes_u32(500), + ); + let volume1_id = Uuid::new_v4(); + let err = datastore + .region_allocate(&opctx, volume1_id, ¶ms) + .await + .unwrap_err(); + assert!(err + .to_string() + .contains("Not enough datasets to allocate disks")); + + assert!(matches!(err, Error::ServiceUnavailable { .. })); + + let _ = db.cleanup().await; + logctx.cleanup_successful(); + } + + // TODO: This test should be updated when the correct handling + // of this out-of-space case is implemented. + #[tokio::test] + async fn test_region_allocation_out_of_space_does_not_fail_yet() { + let logctx = dev::test_setup_log( + "test_region_allocation_out_of_space_does_not_fail_yet", + ); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + let datastore = Arc::new(DataStore::new(Arc::new(pool))); + let opctx = + OpContext::for_tests(logctx.log.new(o!()), datastore.clone()); + + // Create a sled... + let sled_id = create_test_sled(&datastore).await; + + // ... and a zpool within that sled... + let zpool_id = create_test_zpool(&datastore, sled_id).await; + + // ... and datasets within that zpool. + let dataset_count = REGION_REDUNDANCY_THRESHOLD; + let bogus_addr = + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + let dataset_ids: Vec = + (0..dataset_count).map(|_| Uuid::new_v4()).collect(); + for id in &dataset_ids { + let dataset = + Dataset::new(*id, zpool_id, bogus_addr, DatasetKind::Crucible); + datastore.dataset_upsert(dataset).await.unwrap(); + } + + // Allocate regions from the datasets for this disk. + // + // Note that we ask for a disk which is as large as the zpool, + // so we shouldn't have space for redundancy. + let disk_size = test_zpool_size(); + let params = create_test_disk_create_params("disk1", disk_size); + let volume1_id = Uuid::new_v4(); + + // NOTE: This *should* be an error, rather than succeeding. + datastore.region_allocate(&opctx, volume1_id, ¶ms).await.unwrap(); + + let _ = db.cleanup().await; + logctx.cleanup_successful(); + } + + // Validate that queries which should be executable without a full table + // scan are, in fact, runnable without a FULL SCAN. + #[tokio::test] + async fn test_queries_do_not_require_full_table_scan() { + use omicron_common::api::external; + let logctx = + dev::test_setup_log("test_queries_do_not_require_full_table_scan"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = db::Pool::new(&cfg); + let datastore = DataStore::new(Arc::new(pool)); + + let explanation = DataStore::get_allocated_regions_query(Uuid::nil()) + .explain_async(datastore.pool()) + .await + .unwrap(); + assert!( + !explanation.contains("FULL SCAN"), + "Found an unexpected FULL SCAN: {}", + explanation + ); + + let explanation = DataStore::get_allocatable_datasets_query() + .explain_async(datastore.pool()) + .await + .unwrap(); + assert!( + !explanation.contains("FULL SCAN"), + "Found an unexpected FULL SCAN: {}", + explanation + ); + + let subnet = db::model::VpcSubnet::new( + Uuid::nil(), + Uuid::nil(), + external::IdentityMetadataCreateParams { + name: external::Name::try_from(String::from("name")).unwrap(), + description: String::from("description"), + }, + external::Ipv4Net("172.30.0.0/22".parse().unwrap()), + external::Ipv6Net("fd00::/64".parse().unwrap()), + ); + let values = FilterConflictingVpcSubnetRangesQuery::new(subnet); + let query = + diesel::insert_into(db::schema::vpc_subnet::dsl::vpc_subnet) + .values(values) + .returning(VpcSubnet::as_returning()); + println!("{}", diesel::debug_query(&query)); + let explanation = query.explain_async(datastore.pool()).await.unwrap(); + assert!( + !explanation.contains("FULL SCAN"), + "Found an unexpected FULL SCAN: {}", + explanation, + ); + + let _ = db.cleanup().await; + logctx.cleanup_successful(); + } + + // Test sled-specific IPv6 address allocation + #[tokio::test] + async fn test_sled_ipv6_address_allocation() { + use omicron_common::address::RSS_RESERVED_ADDRESSES as STATIC_IPV6_ADDRESS_OFFSET; + use std::net::Ipv6Addr; + + let logctx = dev::test_setup_log("test_sled_ipv6_address_allocation"); + let mut db = test_setup_database(&logctx.log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = Arc::new(db::Pool::new(&cfg)); + let datastore = Arc::new(DataStore::new(Arc::clone(&pool))); + let opctx = + OpContext::for_tests(logctx.log.new(o!()), datastore.clone()); + + let rack_id = Uuid::new_v4(); + let addr1 = "[fd00:1de::1]:12345".parse().unwrap(); + let sled1_id = "0de4b299-e0b4-46f0-d528-85de81a7095f".parse().unwrap(); + let sled1 = db::model::Sled::new(sled1_id, addr1, rack_id); + datastore.sled_upsert(sled1).await.unwrap(); + + let addr2 = "[fd00:1df::1]:12345".parse().unwrap(); + let sled2_id = "66285c18-0c79-43e0-e54f-95271f271314".parse().unwrap(); + let sled2 = db::model::Sled::new(sled2_id, addr2, rack_id); + datastore.sled_upsert(sled2).await.unwrap(); + + let ip = datastore.next_ipv6_address(&opctx, sled1_id).await.unwrap(); + let expected_ip = Ipv6Addr::new( + 0xfd00, + 0x1de, + 0, + 0, + 0, + 0, + 0, + 2 + STATIC_IPV6_ADDRESS_OFFSET, + ); + assert_eq!(ip, expected_ip); + let ip = datastore.next_ipv6_address(&opctx, sled1_id).await.unwrap(); + let expected_ip = Ipv6Addr::new( + 0xfd00, + 0x1de, + 0, + 0, + 0, + 0, + 0, + 3 + STATIC_IPV6_ADDRESS_OFFSET, + ); + assert_eq!(ip, expected_ip); + + let ip = datastore.next_ipv6_address(&opctx, sled2_id).await.unwrap(); + let expected_ip = Ipv6Addr::new( + 0xfd00, + 0x1df, + 0, + 0, + 0, + 0, + 0, + 2 + STATIC_IPV6_ADDRESS_OFFSET, + ); + assert_eq!(ip, expected_ip); + + let _ = db.cleanup().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_ssh_keys() { + let logctx = dev::test_setup_log("test_ssh_keys"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + // Create a new Silo user so that we can lookup their keys. + let silo_user_id = Uuid::new_v4(); + datastore + .silo_user_create(SiloUser::new( + *SILO_ID, + silo_user_id, + "external@id".into(), + )) + .await + .unwrap(); + + let (.., authz_user) = LookupPath::new(&opctx, &datastore) + .silo_user_id(silo_user_id) + .lookup_for(authz::Action::CreateChild) + .await + .unwrap(); + assert_eq!(authz_user.id(), silo_user_id); + + // Create a new SSH public key for the new user. + let key_name = Name::try_from(String::from("sshkey")).unwrap(); + let public_key = "ssh-test AAAAAAAAKEY".to_string(); + let ssh_key = SshKey::new( + silo_user_id, + params::SshKeyCreate { + identity: IdentityMetadataCreateParams { + name: key_name.clone(), + description: "my SSH public key".to_string(), + }, + public_key, + }, + ); + let created = datastore + .ssh_key_create(&opctx, &authz_user, ssh_key.clone()) + .await + .unwrap(); + assert_eq!(created.silo_user_id, ssh_key.silo_user_id); + assert_eq!(created.public_key, ssh_key.public_key); + + // Lookup the key we just created. + let (authz_silo, authz_silo_user, authz_ssh_key, found) = + LookupPath::new(&opctx, &datastore) + .silo_user_id(silo_user_id) + .ssh_key_name(&key_name.into()) + .fetch() + .await + .unwrap(); + assert_eq!(authz_silo.id(), *SILO_ID); + assert_eq!(authz_silo_user.id(), silo_user_id); + assert_eq!(found.silo_user_id, ssh_key.silo_user_id); + assert_eq!(found.public_key, ssh_key.public_key); + + // Trying to insert the same one again fails. + let duplicate = datastore + .ssh_key_create(&opctx, &authz_user, ssh_key.clone()) + .await; + assert!(matches!( + duplicate, + Err(Error::ObjectAlreadyExists { type_name, object_name }) + if type_name == ResourceType::SshKey + && object_name == "sshkey" + )); + + // Delete the key we just created. + datastore.ssh_key_delete(&opctx, &authz_ssh_key).await.unwrap(); + + // Clean up. + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_service_upsert() { + let logctx = dev::test_setup_log("test_service_upsert"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + // Create a sled on which the service should exist. + let sled_id = create_test_sled(&datastore).await; + + // Create a new service to exist on this sled. + let service_id = Uuid::new_v4(); + let addr = Ipv6Addr::LOCALHOST; + let kind = ServiceKind::Nexus; + + let service = Service::new(service_id, sled_id, addr, kind); + let result = + datastore.service_upsert(&opctx, service.clone()).await.unwrap(); + assert_eq!(service.id(), result.id()); + assert_eq!(service.ip, result.ip); + assert_eq!(service.kind, result.kind); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_rack_initialize_is_idempotent() { + let logctx = dev::test_setup_log("test_rack_initialize_is_idempotent"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + // Create a Rack, insert it into the DB. + let rack = Rack::new(Uuid::new_v4()); + let result = datastore.rack_insert(&opctx, &rack).await.unwrap(); + assert_eq!(result.id(), rack.id()); + assert_eq!(result.initialized, false); + + // Re-insert the Rack (check for idempotency). + let result = datastore.rack_insert(&opctx, &rack).await.unwrap(); + assert_eq!(result.id(), rack.id()); + assert_eq!(result.initialized, false); + + // Initialize the Rack. + let result = datastore + .rack_set_initialized(&opctx, rack.id(), vec![]) + .await + .unwrap(); + assert!(result.initialized); + + // Re-initialize the rack (check for idempotency) + let result = datastore + .rack_set_initialized(&opctx, rack.id(), vec![]) + .await + .unwrap(); + assert!(result.initialized); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_table_scan() { + let logctx = dev::test_setup_log("test_table_scan"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + let error = datastore.test_try_table_scan(&opctx).await; + println!("error from attempted table scan: {:#}", error); + match error { + Error::InternalError { internal_message } => { + assert!(internal_message.contains( + "contains a full table/index scan which is \ + explicitly disallowed" + )); + } + error => panic!( + "expected internal error with specific message, found {:?}", + error + ), + } + + // Clean up. + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_deallocate_instance_external_ip_is_idempotent() { + use crate::db::schema::instance_external_ip::dsl; + + let logctx = dev::test_setup_log("test_table_scan"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + // Create a record. + let now = Utc::now(); + let ip = InstanceExternalIp { + id: Uuid::new_v4(), + time_created: now, + time_modified: now, + time_deleted: None, + ip_pool_id: Uuid::new_v4(), + ip_pool_range_id: Uuid::new_v4(), + instance_id: Uuid::new_v4(), + ip: ipnetwork::IpNetwork::from(IpAddr::from(Ipv4Addr::new( + 10, 0, 0, 1, + ))), + first_port: crate::db::model::SqlU16(0), + last_port: crate::db::model::SqlU16(10), + }; + diesel::insert_into(dsl::instance_external_ip) + .values(ip) + .execute_async(datastore.pool()) + .await + .unwrap(); + + // Delete it twice, make sure we get the right sentinel return values. + let deleted = datastore + .deallocate_instance_external_ip(&opctx, ip.id) + .await + .unwrap(); + assert!( + deleted, + "Got unexpected sentinel value back when \ + deleting external IP the first time" + ); + let deleted = datastore + .deallocate_instance_external_ip(&opctx, ip.id) + .await + .unwrap(); + assert!( + !deleted, + "Got unexpected sentinel value back when \ + deleting external IP the second time" + ); + + // Deleting a non-existing record fails + assert!(datastore + .deallocate_instance_external_ip(&opctx, Uuid::nil()) + .await + .is_err()); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } +} diff --git a/nexus/src/db/datastore/network_interface.rs b/nexus/src/db/datastore/network_interface.rs new file mode 100644 index 00000000000..ec9da70f576 --- /dev/null +++ b/nexus/src/db/datastore/network_interface.rs @@ -0,0 +1,355 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods on [`NetworkInterface`]s. + +use super::DataStore; +use crate::authz; +use crate::context::OpContext; +use crate::db; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::error::TransactionError; +use crate::db::model::IncompleteNetworkInterface; +use crate::db::model::Instance; +use crate::db::model::Name; +use crate::db::model::NetworkInterface; +use crate::db::model::NetworkInterfaceUpdate; +use crate::db::pagination::paginated; +use crate::db::queries::network_interface; +use async_bb8_diesel::AsyncConnection; +use async_bb8_diesel::AsyncRunQueryDsl; +use chrono::Utc; +use diesel::prelude::*; +use omicron_common::api::external; +use omicron_common::api::external::DataPageParams; +use omicron_common::api::external::DeleteResult; +use omicron_common::api::external::Error; +use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::UpdateResult; +use sled_agent_client::types as sled_client_types; + +impl DataStore { + /// Create a network interface attached to the provided instance. + pub async fn instance_create_network_interface( + &self, + opctx: &OpContext, + authz_subnet: &authz::VpcSubnet, + authz_instance: &authz::Instance, + interface: IncompleteNetworkInterface, + ) -> Result { + opctx + .authorize(authz::Action::CreateChild, authz_instance) + .await + .map_err(network_interface::InsertError::External)?; + opctx + .authorize(authz::Action::CreateChild, authz_subnet) + .await + .map_err(network_interface::InsertError::External)?; + self.instance_create_network_interface_raw(&opctx, interface).await + } + + pub(crate) async fn instance_create_network_interface_raw( + &self, + opctx: &OpContext, + interface: IncompleteNetworkInterface, + ) -> Result { + use db::schema::network_interface::dsl; + let query = network_interface::InsertQuery::new(interface.clone()); + diesel::insert_into(dsl::network_interface) + .values(query) + .returning(NetworkInterface::as_returning()) + .get_result_async( + self.pool_authorized(opctx) + .await + .map_err(network_interface::InsertError::External)?, + ) + .await + .map_err(|e| { + network_interface::InsertError::from_pool(e, &interface) + }) + } + + /// Delete all network interfaces attached to the given instance. + // NOTE: This is mostly useful in the context of sagas, but might be helpful + // in other situations, such as moving an instance between VPC Subnets. + pub async fn instance_delete_all_network_interfaces( + &self, + opctx: &OpContext, + authz_instance: &authz::Instance, + ) -> DeleteResult { + opctx.authorize(authz::Action::Modify, authz_instance).await?; + + use db::schema::network_interface::dsl; + let now = Utc::now(); + diesel::update(dsl::network_interface) + .filter(dsl::instance_id.eq(authz_instance.id())) + .filter(dsl::time_deleted.is_null()) + .set(dsl::time_deleted.eq(now)) + .execute_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByResource(authz_instance), + ) + })?; + Ok(()) + } + + /// Delete a `NetworkInterface` attached to a provided instance. + /// + /// Note that the primary interface for an instance cannot be deleted if + /// there are any secondary interfaces. + pub async fn instance_delete_network_interface( + &self, + opctx: &OpContext, + authz_instance: &authz::Instance, + authz_interface: &authz::NetworkInterface, + ) -> Result<(), network_interface::DeleteError> { + opctx + .authorize(authz::Action::Delete, authz_interface) + .await + .map_err(network_interface::DeleteError::External)?; + let query = network_interface::DeleteQuery::new( + authz_instance.id(), + authz_interface.id(), + ); + query + .clone() + .execute_async( + self.pool_authorized(opctx) + .await + .map_err(network_interface::DeleteError::External)?, + ) + .await + .map_err(|e| { + network_interface::DeleteError::from_pool(e, &query) + })?; + Ok(()) + } + + /// Return the information about an instance's network interfaces required + /// for the sled agent to instantiate them via OPTE. + /// + /// OPTE requires information that's currently split across the network + /// interface and VPC subnet tables. This query just joins those for each + /// NIC in the given instance. + pub(crate) async fn derive_guest_network_interface_info( + &self, + opctx: &OpContext, + authz_instance: &authz::Instance, + ) -> ListResultVec { + opctx.authorize(authz::Action::ListChildren, authz_instance).await?; + + use db::schema::network_interface; + use db::schema::vpc; + use db::schema::vpc_subnet; + + // The record type for the results of the below JOIN query + #[derive(Debug, diesel::Queryable)] + struct NicInfo { + name: db::model::Name, + ip: ipnetwork::IpNetwork, + mac: db::model::MacAddr, + ipv4_block: db::model::Ipv4Net, + ipv6_block: db::model::Ipv6Net, + vni: db::model::Vni, + primary: bool, + slot: i16, + } + + impl From for sled_client_types::NetworkInterface { + fn from(nic: NicInfo) -> sled_client_types::NetworkInterface { + let ip_subnet = if nic.ip.is_ipv4() { + external::IpNet::V4(nic.ipv4_block.0) + } else { + external::IpNet::V6(nic.ipv6_block.0) + }; + sled_client_types::NetworkInterface { + name: sled_client_types::Name::from(&nic.name.0), + ip: nic.ip.ip(), + mac: sled_client_types::MacAddr::from(nic.mac.0), + subnet: sled_client_types::IpNet::from(ip_subnet), + vni: sled_client_types::Vni::from(nic.vni.0), + primary: nic.primary, + slot: u8::try_from(nic.slot).unwrap(), + } + } + } + + let rows = network_interface::table + .filter(network_interface::instance_id.eq(authz_instance.id())) + .filter(network_interface::time_deleted.is_null()) + .inner_join( + vpc_subnet::table + .on(network_interface::subnet_id.eq(vpc_subnet::id)), + ) + .inner_join(vpc::table.on(vpc_subnet::vpc_id.eq(vpc::id))) + .order_by(network_interface::slot) + // TODO-cleanup: Having to specify each column again is less than + // ideal, but we can't derive `Selectable` since this is the result + // of a JOIN and not from a single table. DRY this out if possible. + .select(( + network_interface::name, + network_interface::ip, + network_interface::mac, + vpc_subnet::ipv4_block, + vpc_subnet::ipv6_block, + vpc::vni, + network_interface::is_primary, + network_interface::slot, + )) + .get_results_async::(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool(e, ErrorHandler::Server) + })?; + Ok(rows + .into_iter() + .map(sled_client_types::NetworkInterface::from) + .collect()) + } + + /// List network interfaces associated with a given instance. + pub async fn instance_list_network_interfaces( + &self, + opctx: &OpContext, + authz_instance: &authz::Instance, + pagparams: &DataPageParams<'_, Name>, + ) -> ListResultVec { + opctx.authorize(authz::Action::ListChildren, authz_instance).await?; + + use db::schema::network_interface::dsl; + paginated(dsl::network_interface, dsl::name, &pagparams) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::instance_id.eq(authz_instance.id())) + .select(NetworkInterface::as_select()) + .load_async::(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + /// Update a network interface associated with a given instance. + pub async fn instance_update_network_interface( + &self, + opctx: &OpContext, + authz_instance: &authz::Instance, + authz_interface: &authz::NetworkInterface, + updates: NetworkInterfaceUpdate, + ) -> UpdateResult { + use crate::db::schema::network_interface::dsl; + + // This database operation is surprisingly subtle. It's possible to + // express this in a single query, with multiple common-table + // expressions for the updated rows. For example, if we're setting a new + // primary interface, we need to set the `is_primary` column to false + // for the current primary, and then set it to true, along with any + // other updates, for the new primary. + // + // That's feasible, but there's a CRDB bug that affects some queries + // with multiple update statements. It's possible that this query isn't + // in that bucket, but we'll still avoid it for now. Instead, we'll bite + // the bullet and use a transaction. + // + // See https://github.com/oxidecomputer/omicron/issues/1204 for the + // issue tracking the work to move this into a CTE. + + // Build up some of the queries first, outside the transaction. + // + // This selects the existing primary interface. + let instance_id = authz_instance.id(); + let interface_id = authz_interface.id(); + let find_primary_query = dsl::network_interface + .filter(dsl::instance_id.eq(instance_id)) + .filter(dsl::is_primary.eq(true)) + .filter(dsl::time_deleted.is_null()) + .select(NetworkInterface::as_select()); + + // This returns the state of the associated instance. + let instance_query = db::schema::instance::dsl::instance + .filter(db::schema::instance::dsl::id.eq(instance_id)) + .filter(db::schema::instance::dsl::time_deleted.is_null()) + .select(Instance::as_select()); + let stopped = + db::model::InstanceState::new(external::InstanceState::Stopped); + + // This is the actual query to update the target interface. + let make_primary = matches!(updates.make_primary, Some(true)); + let update_target_query = diesel::update(dsl::network_interface) + .filter(dsl::id.eq(interface_id)) + .filter(dsl::time_deleted.is_null()) + .set(updates) + .returning(NetworkInterface::as_returning()); + + // Errors returned from the below transactions. + #[derive(Debug)] + enum NetworkInterfaceUpdateError { + InstanceNotStopped, + FailedToUnsetPrimary(diesel::result::Error), + } + type TxnError = TransactionError; + + let pool = self.pool_authorized(opctx).await?; + if make_primary { + pool.transaction(move |conn| { + let instance_state = + instance_query.get_result(conn)?.runtime_state.state; + if instance_state != stopped { + return Err(TxnError::CustomError( + NetworkInterfaceUpdateError::InstanceNotStopped, + )); + } + + // First, get the primary interface + let primary_interface = find_primary_query.get_result(conn)?; + + // If the target and primary are different, we need to toggle + // the primary into a secondary. + if primary_interface.identity.id != interface_id { + if let Err(e) = diesel::update(dsl::network_interface) + .filter(dsl::id.eq(primary_interface.identity.id)) + .filter(dsl::time_deleted.is_null()) + .set(dsl::is_primary.eq(false)) + .execute(conn) + { + return Err(TxnError::CustomError( + NetworkInterfaceUpdateError::FailedToUnsetPrimary( + e, + ), + )); + } + } + + // In any case, update the actual target + Ok(update_target_query.get_result(conn)?) + }) + } else { + // In this case, we can just directly apply the updates. By + // construction, `updates.make_primary` is `None`, so nothing will + // be done there. The other columns always need to be updated, and + // we're only hitting a single row. Note that we still need to + // verify the instance is stopped. + pool.transaction(move |conn| { + let instance_state = + instance_query.get_result(conn)?.runtime_state.state; + if instance_state != stopped { + return Err(TxnError::CustomError( + NetworkInterfaceUpdateError::InstanceNotStopped, + )); + } + Ok(update_target_query.get_result(conn)?) + }) + } + .await + .map_err(|e| match e { + TxnError::CustomError( + NetworkInterfaceUpdateError::InstanceNotStopped, + ) => Error::invalid_request( + "Instance must be stopped to update its network interfaces", + ), + _ => Error::internal_error(&format!("Transaction error: {:?}", e)), + }) + } +} diff --git a/nexus/src/db/datastore/organization.rs b/nexus/src/db/datastore/organization.rs new file mode 100644 index 00000000000..e237aa63819 --- /dev/null +++ b/nexus/src/db/datastore/organization.rs @@ -0,0 +1,197 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods on [`Organization`]s. + +use super::DataStore; +use crate::authz; +use crate::context::OpContext; +use crate::db; +use crate::db::collection_insert::AsyncInsertError; +use crate::db::collection_insert::DatastoreCollection; +use crate::db::error::diesel_pool_result_optional; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::identity::Resource; +use crate::db::model::Name; +use crate::db::model::Organization; +use crate::db::model::OrganizationUpdate; +use crate::db::model::Silo; +use crate::db::pagination::paginated; +use crate::external_api::params; +use async_bb8_diesel::AsyncRunQueryDsl; +use chrono::Utc; +use diesel::prelude::*; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DataPageParams; +use omicron_common::api::external::DeleteResult; +use omicron_common::api::external::Error; +use omicron_common::api::external::InternalContext; +use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::ResourceType; +use omicron_common::api::external::UpdateResult; +use uuid::Uuid; + +impl DataStore { + /// Create a organization + pub async fn organization_create( + &self, + opctx: &OpContext, + organization: ¶ms::OrganizationCreate, + ) -> CreateResult { + let authz_silo = opctx + .authn + .silo_required() + .internal_context("creating an Organization")?; + opctx.authorize(authz::Action::CreateChild, &authz_silo).await?; + + use db::schema::organization::dsl; + let silo_id = authz_silo.id(); + let organization = Organization::new(organization.clone(), silo_id); + let name = organization.name().as_str().to_string(); + + Silo::insert_resource( + silo_id, + diesel::insert_into(dsl::organization).values(organization), + ) + .insert_and_get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| match e { + AsyncInsertError::CollectionNotFound => Error::InternalError { + internal_message: format!( + "attempting to create an \ + organization under non-existent silo {}", + silo_id + ), + }, + AsyncInsertError::DatabaseError(e) => { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict(ResourceType::Organization, &name), + ) + } + }) + } + + /// Delete a organization + pub async fn organization_delete( + &self, + opctx: &OpContext, + authz_org: &authz::Organization, + db_org: &db::model::Organization, + ) -> DeleteResult { + opctx.authorize(authz::Action::Delete, authz_org).await?; + + use db::schema::organization::dsl; + use db::schema::project; + + // Make sure there are no projects present within this organization. + let project_found = diesel_pool_result_optional( + project::dsl::project + .filter(project::dsl::organization_id.eq(authz_org.id())) + .filter(project::dsl::time_deleted.is_null()) + .select(project::dsl::id) + .limit(1) + .first_async::(self.pool_authorized(opctx).await?) + .await, + ) + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; + if project_found.is_some() { + return Err(Error::InvalidRequest { + message: "organization to be deleted contains a project" + .to_string(), + }); + } + + let now = Utc::now(); + let updated_rows = diesel::update(dsl::organization) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(authz_org.id())) + .filter(dsl::rcgen.eq(db_org.rcgen)) + .set(dsl::time_deleted.eq(now)) + .execute_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByResource(authz_org), + ) + })?; + + if updated_rows == 0 { + return Err(Error::InvalidRequest { + message: "deletion failed due to concurrent modification" + .to_string(), + }); + } + Ok(()) + } + + pub async fn organizations_list_by_id( + &self, + opctx: &OpContext, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + let authz_silo = opctx + .authn + .silo_required() + .internal_context("listing Organizations")?; + opctx.authorize(authz::Action::ListChildren, &authz_silo).await?; + + use db::schema::organization::dsl; + paginated(dsl::organization, dsl::id, pagparams) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::silo_id.eq(authz_silo.id())) + .select(Organization::as_select()) + .load_async::(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + pub async fn organizations_list_by_name( + &self, + opctx: &OpContext, + pagparams: &DataPageParams<'_, Name>, + ) -> ListResultVec { + let authz_silo = opctx + .authn + .silo_required() + .internal_context("listing Organizations")?; + opctx.authorize(authz::Action::ListChildren, &authz_silo).await?; + + use db::schema::organization::dsl; + paginated(dsl::organization, dsl::name, pagparams) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::silo_id.eq(authz_silo.id())) + .select(Organization::as_select()) + .load_async::(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + /// Updates a organization by name (clobbering update -- no etag) + pub async fn organization_update( + &self, + opctx: &OpContext, + authz_org: &authz::Organization, + updates: OrganizationUpdate, + ) -> UpdateResult { + use db::schema::organization::dsl; + + opctx.authorize(authz::Action::Modify, authz_org).await?; + diesel::update(dsl::organization) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(authz_org.id())) + .set(updates) + .returning(Organization::as_returning()) + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByResource(authz_org), + ) + }) + } +} diff --git a/nexus/src/db/datastore/oximeter.rs b/nexus/src/db/datastore/oximeter.rs new file mode 100644 index 00000000000..178c2466a72 --- /dev/null +++ b/nexus/src/db/datastore/oximeter.rs @@ -0,0 +1,126 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods related to Oximeter. + +use super::DataStore; +use crate::db; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::model::OximeterInfo; +use crate::db::model::ProducerEndpoint; +use crate::db::pagination::paginated; +use async_bb8_diesel::AsyncRunQueryDsl; +use chrono::Utc; +use diesel::prelude::*; +use omicron_common::api::external::DataPageParams; +use omicron_common::api::external::Error; +use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::ResourceType; +use uuid::Uuid; + +impl DataStore { + // Create a record for a new Oximeter instance + pub async fn oximeter_create( + &self, + info: &OximeterInfo, + ) -> Result<(), Error> { + use db::schema::oximeter::dsl; + + // If we get a conflict on the Oximeter ID, this means that collector instance was + // previously registered, and it's re-registering due to something like a service restart. + // In this case, we update the time modified and the service address, rather than + // propagating a constraint violation to the caller. + diesel::insert_into(dsl::oximeter) + .values(*info) + .on_conflict(dsl::id) + .do_update() + .set(( + dsl::time_modified.eq(Utc::now()), + dsl::ip.eq(info.ip), + dsl::port.eq(info.port), + )) + .execute_async(self.pool()) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict( + ResourceType::Oximeter, + "Oximeter Info", + ), + ) + })?; + Ok(()) + } + + // List the oximeter collector instances + pub async fn oximeter_list( + &self, + page_params: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + use db::schema::oximeter::dsl; + paginated(dsl::oximeter, dsl::id, page_params) + .load_async::(self.pool()) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + // Create a record for a new producer endpoint + pub async fn producer_endpoint_create( + &self, + producer: &ProducerEndpoint, + ) -> Result<(), Error> { + use db::schema::metric_producer::dsl; + + // TODO: see https://github.com/oxidecomputer/omicron/issues/323 + diesel::insert_into(dsl::metric_producer) + .values(producer.clone()) + .on_conflict(dsl::id) + .do_update() + .set(( + dsl::time_modified.eq(Utc::now()), + dsl::ip.eq(producer.ip), + dsl::port.eq(producer.port), + dsl::interval.eq(producer.interval), + dsl::base_route.eq(producer.base_route.clone()), + )) + .execute_async(self.pool()) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict( + ResourceType::MetricProducer, + "Producer Endpoint", + ), + ) + })?; + Ok(()) + } + + // List the producer endpoint records by the oximeter instance to which they're assigned. + pub async fn producers_list_by_oximeter_id( + &self, + oximeter_id: Uuid, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + use db::schema::metric_producer::dsl; + paginated(dsl::metric_producer, dsl::id, &pagparams) + .filter(dsl::oximeter_id.eq(oximeter_id)) + .order_by((dsl::oximeter_id, dsl::id)) + .select(ProducerEndpoint::as_select()) + .load_async(self.pool()) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict( + ResourceType::MetricProducer, + "By Oximeter ID", + ), + ) + }) + } +} diff --git a/nexus/src/db/datastore/project.rs b/nexus/src/db/datastore/project.rs new file mode 100644 index 00000000000..b4063d7ad49 --- /dev/null +++ b/nexus/src/db/datastore/project.rs @@ -0,0 +1,160 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods on [`Project`]s. + +use super::DataStore; +use crate::authz; +use crate::context::OpContext; +use crate::db; +use crate::db::collection_insert::AsyncInsertError; +use crate::db::collection_insert::DatastoreCollection; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::identity::Resource; +use crate::db::model::Name; +use crate::db::model::Organization; +use crate::db::model::Project; +use crate::db::model::ProjectUpdate; +use crate::db::pagination::paginated; +use async_bb8_diesel::AsyncRunQueryDsl; +use chrono::Utc; +use diesel::prelude::*; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DataPageParams; +use omicron_common::api::external::DeleteResult; +use omicron_common::api::external::Error; +use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::LookupType; +use omicron_common::api::external::ResourceType; +use omicron_common::api::external::UpdateResult; +use uuid::Uuid; + +impl DataStore { + /// Create a project + pub async fn project_create( + &self, + opctx: &OpContext, + org: &authz::Organization, + project: Project, + ) -> CreateResult { + use db::schema::project::dsl; + + opctx.authorize(authz::Action::CreateChild, org).await?; + + let name = project.name().as_str().to_string(); + let organization_id = project.organization_id; + Organization::insert_resource( + organization_id, + diesel::insert_into(dsl::project).values(project), + ) + .insert_and_get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| match e { + AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { + type_name: ResourceType::Organization, + lookup_type: LookupType::ById(organization_id), + }, + AsyncInsertError::DatabaseError(e) => { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict(ResourceType::Project, &name), + ) + } + }) + } + + /// Delete a project + // TODO-correctness This needs to check whether there are any resources that + // depend on the Project (Disks, Instances). We can do this with a + // generation counter that gets bumped when these resources are created. + pub async fn project_delete( + &self, + opctx: &OpContext, + authz_project: &authz::Project, + ) -> DeleteResult { + opctx.authorize(authz::Action::Delete, authz_project).await?; + + use db::schema::project::dsl; + + let now = Utc::now(); + diesel::update(dsl::project) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(authz_project.id())) + .set(dsl::time_deleted.eq(now)) + .returning(Project::as_returning()) + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByResource(authz_project), + ) + })?; + Ok(()) + } + + pub async fn projects_list_by_id( + &self, + opctx: &OpContext, + authz_org: &authz::Organization, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + use db::schema::project::dsl; + + opctx.authorize(authz::Action::ListChildren, authz_org).await?; + + paginated(dsl::project, dsl::id, pagparams) + .filter(dsl::organization_id.eq(authz_org.id())) + .filter(dsl::time_deleted.is_null()) + .select(Project::as_select()) + .load_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + pub async fn projects_list_by_name( + &self, + opctx: &OpContext, + authz_org: &authz::Organization, + pagparams: &DataPageParams<'_, Name>, + ) -> ListResultVec { + use db::schema::project::dsl; + + opctx.authorize(authz::Action::ListChildren, authz_org).await?; + + paginated(dsl::project, dsl::name, &pagparams) + .filter(dsl::organization_id.eq(authz_org.id())) + .filter(dsl::time_deleted.is_null()) + .select(Project::as_select()) + .load_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + /// Updates a project (clobbering update -- no etag) + pub async fn project_update( + &self, + opctx: &OpContext, + authz_project: &authz::Project, + updates: ProjectUpdate, + ) -> UpdateResult { + opctx.authorize(authz::Action::Modify, authz_project).await?; + + use db::schema::project::dsl; + diesel::update(dsl::project) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(authz_project.id())) + .set(updates) + .returning(Project::as_returning()) + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByResource(authz_project), + ) + }) + } +} diff --git a/nexus/src/db/datastore/rack.rs b/nexus/src/db/datastore/rack.rs new file mode 100644 index 00000000000..06d298a4261 --- /dev/null +++ b/nexus/src/db/datastore/rack.rs @@ -0,0 +1,187 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods on [`Rack`]s. + +use super::DataStore; +use crate::authz; +use crate::context::OpContext; +use crate::db; +use crate::db::collection_insert::DatastoreCollection; +use crate::db::collection_insert::SyncInsertError; +use crate::db::error::public_error_from_diesel_create; +use crate::db::error::public_error_from_diesel_lookup; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::error::TransactionError; +use crate::db::identity::Asset; +use crate::db::model::Rack; +use crate::db::model::Service; +use crate::db::model::Sled; +use crate::db::pagination::paginated; +use async_bb8_diesel::AsyncConnection; +use async_bb8_diesel::AsyncRunQueryDsl; +use chrono::Utc; +use diesel::prelude::*; +use diesel::upsert::excluded; +use omicron_common::api::external::DataPageParams; +use omicron_common::api::external::Error; +use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::LookupType; +use omicron_common::api::external::ResourceType; +use omicron_common::api::external::UpdateResult; +use uuid::Uuid; + +impl DataStore { + pub async fn rack_list( + &self, + opctx: &OpContext, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + use db::schema::rack::dsl; + paginated(dsl::rack, dsl::id, pagparams) + .select(Rack::as_select()) + .load_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + /// Stores a new rack in the database. + /// + /// This function is a no-op if the rack already exists. + pub async fn rack_insert( + &self, + opctx: &OpContext, + rack: &Rack, + ) -> Result { + use db::schema::rack::dsl; + + diesel::insert_into(dsl::rack) + .values(rack.clone()) + .on_conflict(dsl::id) + .do_update() + // This is a no-op, since we conflicted on the ID. + .set(dsl::id.eq(excluded(dsl::id))) + .returning(Rack::as_returning()) + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict( + ResourceType::Rack, + &rack.id().to_string(), + ), + ) + }) + } + + /// Update a rack to mark that it has been initialized + pub async fn rack_set_initialized( + &self, + opctx: &OpContext, + rack_id: Uuid, + services: Vec, + ) -> UpdateResult { + use db::schema::rack::dsl as rack_dsl; + use db::schema::service::dsl as service_dsl; + + #[derive(Debug)] + enum RackInitError { + ServiceInsert { err: SyncInsertError, sled_id: Uuid, svc_id: Uuid }, + RackUpdate(diesel::result::Error), + } + type TxnError = TransactionError; + + // NOTE: This operation could likely be optimized with a CTE, but given + // the low-frequency of calls, this optimization has been deferred. + self.pool_authorized(opctx) + .await? + .transaction(move |conn| { + // Early exit if the rack has already been initialized. + let rack = rack_dsl::rack + .filter(rack_dsl::id.eq(rack_id)) + .select(Rack::as_select()) + .get_result(conn) + .map_err(|e| { + TxnError::CustomError(RackInitError::RackUpdate(e)) + })?; + if rack.initialized { + return Ok(rack); + } + + // Otherwise, insert services and set rack.initialized = true. + for svc in services { + let sled_id = svc.sled_id; + >::insert_resource( + sled_id, + diesel::insert_into(service_dsl::service) + .values(svc.clone()) + .on_conflict(service_dsl::id) + .do_update() + .set(( + service_dsl::time_modified.eq(Utc::now()), + service_dsl::sled_id + .eq(excluded(service_dsl::sled_id)), + service_dsl::ip.eq(excluded(service_dsl::ip)), + service_dsl::kind + .eq(excluded(service_dsl::kind)), + )), + ) + .insert_and_get_result(conn) + .map_err(|err| { + TxnError::CustomError(RackInitError::ServiceInsert { + err, + sled_id, + svc_id: svc.id(), + }) + })?; + } + diesel::update(rack_dsl::rack) + .filter(rack_dsl::id.eq(rack_id)) + .set(( + rack_dsl::initialized.eq(true), + rack_dsl::time_modified.eq(Utc::now()), + )) + .returning(Rack::as_returning()) + .get_result::(conn) + .map_err(|e| { + TxnError::CustomError(RackInitError::RackUpdate(e)) + }) + }) + .await + .map_err(|e| match e { + TxnError::CustomError(RackInitError::ServiceInsert { + err, + sled_id, + svc_id, + }) => match err { + SyncInsertError::CollectionNotFound => { + Error::ObjectNotFound { + type_name: ResourceType::Sled, + lookup_type: LookupType::ById(sled_id), + } + } + SyncInsertError::DatabaseError(e) => { + public_error_from_diesel_create( + e, + ResourceType::Service, + &svc_id.to_string(), + ) + } + }, + TxnError::CustomError(RackInitError::RackUpdate(err)) => { + public_error_from_diesel_lookup( + err, + ResourceType::Rack, + &LookupType::ById(rack_id), + ) + } + TxnError::Pool(e) => { + Error::internal_error(&format!("Transaction error: {}", e)) + } + }) + } +} diff --git a/nexus/src/db/datastore/region.rs b/nexus/src/db/datastore/region.rs new file mode 100644 index 00000000000..1f2b51316f0 --- /dev/null +++ b/nexus/src/db/datastore/region.rs @@ -0,0 +1,263 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods on [`Region`]s. + +use super::DataStore; +use super::RunnableQuery; +use super::REGION_REDUNDANCY_THRESHOLD; +use crate::context::OpContext; +use crate::db; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::error::TransactionError; +use crate::db::identity::Asset; +use crate::db::lookup::LookupPath; +use crate::db::model::Dataset; +use crate::db::model::Region; +use crate::external_api::params; +use async_bb8_diesel::AsyncConnection; +use async_bb8_diesel::AsyncRunQueryDsl; +use diesel::prelude::*; +use omicron_common::api::external::DeleteResult; +use omicron_common::api::external::Error; +use uuid::Uuid; + +impl DataStore { + pub(super) fn get_allocated_regions_query( + volume_id: Uuid, + ) -> impl RunnableQuery<(Dataset, Region)> { + use db::schema::dataset::dsl as dataset_dsl; + use db::schema::region::dsl as region_dsl; + region_dsl::region + .filter(region_dsl::volume_id.eq(volume_id)) + .inner_join( + dataset_dsl::dataset + .on(region_dsl::dataset_id.eq(dataset_dsl::id)), + ) + .select((Dataset::as_select(), Region::as_select())) + } + + /// Gets allocated regions for a disk, and the datasets to which those + /// regions belong. + /// + /// Note that this function does not validate liveness of the Disk, so it + /// may be used in a context where the disk is being deleted. + pub async fn get_allocated_regions( + &self, + volume_id: Uuid, + ) -> Result, Error> { + Self::get_allocated_regions_query(volume_id) + .get_results_async::<(Dataset, Region)>(self.pool()) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + async fn get_block_size_from_disk_create( + &self, + opctx: &OpContext, + disk_create: ¶ms::DiskCreate, + ) -> Result { + match &disk_create.disk_source { + params::DiskSource::Blank { block_size } => { + Ok(db::model::BlockSize::try_from(*block_size) + .map_err(|e| Error::invalid_request(&e.to_string()))?) + } + params::DiskSource::Snapshot { snapshot_id: _ } => { + // Until we implement snapshots, do not allow disks to be + // created from a snapshot. + return Err(Error::InvalidValue { + label: String::from("snapshot"), + message: String::from("snapshots are not yet supported"), + }); + } + params::DiskSource::Image { image_id: _ } => { + // Until we implement project images, do not allow disks to be + // created from a project image. + return Err(Error::InvalidValue { + label: String::from("image"), + message: String::from( + "project image are not yet supported", + ), + }); + } + params::DiskSource::GlobalImage { image_id } => { + let (.., db_global_image) = LookupPath::new(opctx, &self) + .global_image_id(*image_id) + .fetch() + .await?; + + Ok(db_global_image.block_size) + } + } + } + + /// Idempotently allocates enough regions to back a disk. + /// + /// Returns the allocated regions, as well as the datasets to which they + /// belong. + pub async fn region_allocate( + &self, + opctx: &OpContext, + volume_id: Uuid, + params: ¶ms::DiskCreate, + ) -> Result, Error> { + use db::schema::dataset::dsl as dataset_dsl; + use db::schema::region::dsl as region_dsl; + + // ALLOCATION POLICY + // + // NOTE: This policy can - and should! - be changed. + // + // See https://rfd.shared.oxide.computer/rfd/0205 for a more + // complete discussion. + // + // It is currently acting as a placeholder, showing a feasible + // interaction between datasets and regions. + // + // This policy allocates regions to distinct Crucible datasets, + // favoring datasets with the smallest existing (summed) region + // sizes. Basically, "pick the datasets with the smallest load first". + // + // Longer-term, we should consider: + // - Storage size + remaining free space + // - Sled placement of datasets + // - What sort of loads we'd like to create (even split across all disks + // may not be preferable, especially if maintenance is expected) + #[derive(Debug, thiserror::Error)] + enum RegionAllocateError { + #[error("Not enough datasets for replicated allocation: {0}")] + NotEnoughDatasets(usize), + } + type TxnError = TransactionError; + + let params: params::DiskCreate = params.clone(); + let block_size = + self.get_block_size_from_disk_create(opctx, ¶ms).await?; + let blocks_per_extent = + params.extent_size() / block_size.to_bytes() as i64; + + self.pool() + .transaction(move |conn| { + // First, for idempotency, check if regions are already + // allocated to this disk. + // + // If they are, return those regions and the associated + // datasets. + let datasets_and_regions = + Self::get_allocated_regions_query(volume_id) + .get_results::<(Dataset, Region)>(conn)?; + if !datasets_and_regions.is_empty() { + return Ok(datasets_and_regions); + } + + let mut datasets: Vec = + Self::get_allocatable_datasets_query() + .get_results::(conn)?; + + if datasets.len() < REGION_REDUNDANCY_THRESHOLD { + return Err(TxnError::CustomError( + RegionAllocateError::NotEnoughDatasets(datasets.len()), + )); + } + + // Create identical regions on each of the following datasets. + let source_datasets = + &mut datasets[0..REGION_REDUNDANCY_THRESHOLD]; + let regions: Vec = source_datasets + .iter() + .map(|dataset| { + Region::new( + dataset.id(), + volume_id, + block_size.into(), + blocks_per_extent, + params.extent_count(), + ) + }) + .collect(); + let regions = diesel::insert_into(region_dsl::region) + .values(regions) + .returning(Region::as_returning()) + .get_results(conn)?; + + // Update the tallied sizes in the source datasets containing + // those regions. + let region_size = i64::from(block_size.to_bytes()) + * blocks_per_extent + * params.extent_count(); + for dataset in source_datasets.iter_mut() { + dataset.size_used = + dataset.size_used.map(|v| v + region_size); + } + + let dataset_ids: Vec = + source_datasets.iter().map(|ds| ds.id()).collect(); + diesel::update(dataset_dsl::dataset) + .filter(dataset_dsl::id.eq_any(dataset_ids)) + .set( + dataset_dsl::size_used + .eq(dataset_dsl::size_used + region_size), + ) + .execute(conn)?; + + // Return the regions with the datasets to which they were allocated. + Ok(source_datasets + .into_iter() + .map(|d| d.clone()) + .zip(regions) + .collect()) + }) + .await + .map_err(|e| match e { + TxnError::CustomError( + RegionAllocateError::NotEnoughDatasets(_), + ) => Error::unavail("Not enough datasets to allocate disks"), + _ => { + Error::internal_error(&format!("Transaction error: {}", e)) + } + }) + } + + /// Deletes all regions backing a disk. + /// + /// Also updates the storage usage on their corresponding datasets. + pub async fn regions_hard_delete(&self, volume_id: Uuid) -> DeleteResult { + use db::schema::dataset::dsl as dataset_dsl; + use db::schema::region::dsl as region_dsl; + + // Remove the regions, collecting datasets they're from. + let (dataset_id, size) = diesel::delete(region_dsl::region) + .filter(region_dsl::volume_id.eq(volume_id)) + .returning(( + region_dsl::dataset_id, + region_dsl::block_size + * region_dsl::blocks_per_extent + * region_dsl::extent_count, + )) + .get_result_async::<(Uuid, i64)>(self.pool()) + .await + .map_err(|e| { + Error::internal_error(&format!( + "error deleting regions: {:?}", + e + )) + })?; + + // Update those datasets to which the regions belonged. + diesel::update(dataset_dsl::dataset) + .filter(dataset_dsl::id.eq(dataset_id)) + .set(dataset_dsl::size_used.eq(dataset_dsl::size_used - size)) + .execute_async(self.pool()) + .await + .map_err(|e| { + Error::internal_error(&format!( + "error updating dataset space: {:?}", + e + )) + })?; + + Ok(()) + } +} diff --git a/nexus/src/db/datastore/role.rs b/nexus/src/db/datastore/role.rs new file mode 100644 index 00000000000..08a2e6cd17d --- /dev/null +++ b/nexus/src/db/datastore/role.rs @@ -0,0 +1,264 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods related to roles. + +use super::DataStore; +use crate::authz; +use crate::context::OpContext; +use crate::db; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::fixed_data::role_assignment::BUILTIN_ROLE_ASSIGNMENTS; +use crate::db::fixed_data::role_builtin::BUILTIN_ROLES; +use crate::db::model::DatabaseString; +use crate::db::model::IdentityType; +use crate::db::model::RoleAssignment; +use crate::db::model::RoleBuiltin; +use crate::db::pagination::paginated_multicolumn; +use crate::external_api::shared; +use async_bb8_diesel::AsyncConnection; +use async_bb8_diesel::AsyncRunQueryDsl; +use diesel::prelude::*; +use omicron_common::api::external::DataPageParams; +use omicron_common::api::external::Error; +use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::ResourceType; +use omicron_common::bail_unless; +use uuid::Uuid; + +impl DataStore { + /// List built-in roles + pub async fn roles_builtin_list_by_name( + &self, + opctx: &OpContext, + pagparams: &DataPageParams<'_, (String, String)>, + ) -> ListResultVec { + use db::schema::role_builtin::dsl; + opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; + paginated_multicolumn( + dsl::role_builtin, + (dsl::resource_type, dsl::role_name), + pagparams, + ) + .select(RoleBuiltin::as_select()) + .load_async::(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + /// Load built-in roles into the database + pub async fn load_builtin_roles( + &self, + opctx: &OpContext, + ) -> Result<(), Error> { + use db::schema::role_builtin::dsl; + + opctx.authorize(authz::Action::Modify, &authz::DATABASE).await?; + + let builtin_roles = BUILTIN_ROLES + .iter() + .map(|role_config| { + RoleBuiltin::new( + role_config.resource_type, + &role_config.role_name, + &role_config.description, + ) + }) + .collect::>(); + + debug!(opctx.log, "attempting to create built-in roles"); + let count = diesel::insert_into(dsl::role_builtin) + .values(builtin_roles) + .on_conflict((dsl::resource_type, dsl::role_name)) + .do_nothing() + .execute_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool(e, ErrorHandler::Server) + })?; + info!(opctx.log, "created {} built-in roles", count); + Ok(()) + } + + /// Load role assignments for built-in users and built-in roles into the + /// database + pub async fn load_builtin_role_asgns( + &self, + opctx: &OpContext, + ) -> Result<(), Error> { + use db::schema::role_assignment::dsl; + + opctx.authorize(authz::Action::Modify, &authz::DATABASE).await?; + + debug!(opctx.log, "attempting to create built-in role assignments"); + let count = diesel::insert_into(dsl::role_assignment) + .values(&*BUILTIN_ROLE_ASSIGNMENTS) + .on_conflict(( + dsl::identity_type, + dsl::identity_id, + dsl::resource_type, + dsl::resource_id, + dsl::role_name, + )) + .do_nothing() + .execute_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool(e, ErrorHandler::Server) + })?; + info!(opctx.log, "created {} built-in role assignments", count); + Ok(()) + } + + /// Return the built-in roles that the given built-in user has for the given + /// resource + pub async fn role_asgn_list_for( + &self, + opctx: &OpContext, + identity_type: IdentityType, + identity_id: Uuid, + resource_type: ResourceType, + resource_id: Uuid, + ) -> Result, Error> { + use db::schema::role_assignment::dsl; + + // There is no resource-specific authorization check because all + // authenticated users need to be able to list their own roles -- + // otherwise we can't do any authorization checks. + // TODO-security rethink this -- how do we know the user is looking up + // their own roles? Maybe this should use an internal authz context. + + // TODO-scalability TODO-security This needs to be paginated. It's not + // exposed via an external API right now but someone could still put us + // into some hurt by assigning loads of roles to someone and having that + // person attempt to access anything. + dsl::role_assignment + .filter(dsl::identity_type.eq(identity_type)) + .filter(dsl::identity_id.eq(identity_id)) + .filter(dsl::resource_type.eq(resource_type.to_string())) + .filter(dsl::resource_id.eq(resource_id)) + .select(RoleAssignment::as_select()) + .load_async::(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + /// Fetches all of the externally-visible role assignments for the specified + /// resource + /// + /// Role assignments for internal identities (e.g., built-in users) are not + /// included in this list. + /// + /// This function is generic over all resources that can accept roles (e.g., + /// Fleet, Silo, Organization, etc.). + // TODO-scalability In an ideal world, this would be paginated. The impact + // is mitigated because we cap the number of role assignments per resource + // pretty tightly. + pub async fn role_assignment_fetch_visible< + T: authz::ApiResourceWithRoles + Clone, + >( + &self, + opctx: &OpContext, + authz_resource: &T, + ) -> ListResultVec { + opctx.authorize(authz::Action::ReadPolicy, authz_resource).await?; + let resource_type = authz_resource.resource_type(); + let resource_id = authz_resource.resource_id(); + use db::schema::role_assignment::dsl; + dsl::role_assignment + .filter(dsl::resource_type.eq(resource_type.to_string())) + .filter(dsl::resource_id.eq(resource_id)) + .filter(dsl::identity_type.ne(IdentityType::UserBuiltin)) + .order(dsl::role_name.asc()) + .then_order_by(dsl::identity_id.asc()) + .select(RoleAssignment::as_select()) + .load_async::(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + /// Removes all existing externally-visble role assignments on + /// `authz_resource` and adds those specified by `new_assignments` + /// + /// Role assignments for internal identities (e.g., built-in users) are not + /// affected. + /// + /// The expectation is that the caller will have just fetched the role + /// assignments, modified them, and is giving us the complete new list. + /// + /// This function is generic over all resources that can accept roles (e.g., + /// Fleet, Silo, Organization, etc.). + // TODO-correctness As with the rest of the API, we're lacking an ability + // for an ETag precondition check here. + // TODO-scalability In an ideal world, this would update in batches. That's + // tricky without first-classing the Policy in the database. The impact is + // mitigated because we cap the number of role assignments per resource + // pretty tightly. + pub async fn role_assignment_replace_visible( + &self, + opctx: &OpContext, + authz_resource: &T, + new_assignments: &[shared::RoleAssignment], + ) -> ListResultVec + where + T: authz::ApiResourceWithRolesType + Clone, + { + // TODO-security We should carefully review what permissions are + // required for modifying the policy of a resource. + opctx.authorize(authz::Action::ModifyPolicy, authz_resource).await?; + bail_unless!( + new_assignments.len() <= shared::MAX_ROLE_ASSIGNMENTS_PER_RESOURCE + ); + + let resource_type = authz_resource.resource_type(); + let resource_id = authz_resource.resource_id(); + + // Sort the records in the same order that we would return them when + // listing them. This is because we're going to use RETURNING to return + // the inserted rows from the database and we want them to come back in + // the same order that we would normally list them. + let mut new_assignments = new_assignments + .iter() + .map(|r| { + db::model::RoleAssignment::new( + db::model::IdentityType::from(r.identity_type), + r.identity_id, + resource_type, + resource_id, + &r.role_name.to_database_string(), + ) + }) + .collect::>(); + new_assignments.sort_by(|r1, r2| { + (&r1.role_name, r1.identity_id) + .cmp(&(&r2.role_name, r2.identity_id)) + }); + + use db::schema::role_assignment::dsl; + let delete_old_query = diesel::delete(dsl::role_assignment) + .filter(dsl::resource_id.eq(resource_id)) + .filter(dsl::resource_type.eq(resource_type.to_string())) + .filter(dsl::identity_type.ne(IdentityType::UserBuiltin)); + let insert_new_query = diesel::insert_into(dsl::role_assignment) + .values(new_assignments) + .returning(RoleAssignment::as_returning()); + + // TODO-scalability: Ideally this would be a batched transaction so we + // don't need to hold a transaction open across multiple roundtrips from + // the database, but for now we're using a transaction due to the + // severely decreased legibility of CTEs via diesel right now. + // We might instead want to first-class the idea of Policies in the + // database so that we can build up a whole new Policy in batches and + // then flip the resource over to using it. + self.pool_authorized(opctx) + .await? + .transaction(move |conn| { + delete_old_query.execute(conn)?; + Ok(insert_new_query.get_results(conn)?) + }) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } +} diff --git a/nexus/src/db/datastore/saga.rs b/nexus/src/db/datastore/saga.rs new file mode 100644 index 00000000000..8427735bd46 --- /dev/null +++ b/nexus/src/db/datastore/saga.rs @@ -0,0 +1,160 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods on [`Saga`]s. + +use super::DataStore; +use crate::db; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::model::Generation; +use crate::db::pagination::paginated; +use crate::db::update_and_check::UpdateAndCheck; +use crate::db::update_and_check::UpdateStatus; +use async_bb8_diesel::AsyncRunQueryDsl; +use diesel::prelude::*; +use omicron_common::api::external::DataPageParams; +use omicron_common::api::external::Error; +use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::LookupType; +use omicron_common::api::external::ResourceType; +use uuid::Uuid; + +impl DataStore { + pub async fn saga_create( + &self, + saga: &db::saga_types::Saga, + ) -> Result<(), Error> { + use db::schema::saga::dsl; + + let name = saga.template_name.clone(); + diesel::insert_into(dsl::saga) + .values(saga.clone()) + .execute_async(self.pool()) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict(ResourceType::SagaDbg, &name), + ) + })?; + Ok(()) + } + + pub async fn saga_create_event( + &self, + event: &db::saga_types::SagaNodeEvent, + ) -> Result<(), Error> { + use db::schema::saga_node_event::dsl; + + // TODO-robustness This INSERT ought to be conditional on this SEC still + // owning this saga. + diesel::insert_into(dsl::saga_node_event) + .values(event.clone()) + .execute_async(self.pool()) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict(ResourceType::SagaDbg, "Saga Event"), + ) + })?; + Ok(()) + } + + pub async fn saga_update_state( + &self, + saga_id: steno::SagaId, + new_state: steno::SagaCachedState, + current_sec: db::saga_types::SecId, + current_adopt_generation: Generation, + ) -> Result<(), Error> { + use db::schema::saga::dsl; + + let saga_id: db::saga_types::SagaId = saga_id.into(); + let result = diesel::update(dsl::saga) + .filter(dsl::id.eq(saga_id)) + .filter(dsl::current_sec.eq(current_sec)) + .filter(dsl::adopt_generation.eq(current_adopt_generation)) + .set(dsl::saga_state.eq(db::saga_types::SagaCachedState(new_state))) + .check_if_exists::(saga_id) + .execute_and_check(self.pool()) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::SagaDbg, + LookupType::ById(saga_id.0.into()), + ), + ) + })?; + + match result.status { + UpdateStatus::Updated => Ok(()), + UpdateStatus::NotUpdatedButExists => Err(Error::InvalidRequest { + message: format!( + "failed to update saga {:?} with state {:?}: preconditions not met: \ + expected current_sec = {:?}, adopt_generation = {:?}, \ + but found current_sec = {:?}, adopt_generation = {:?}, state = {:?}", + saga_id, + new_state, + current_sec, + current_adopt_generation, + result.found.current_sec, + result.found.adopt_generation, + result.found.saga_state, + ) + }), + } + } + + pub async fn saga_list_unfinished_by_id( + &self, + sec_id: &db::SecId, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + use db::schema::saga::dsl; + paginated(dsl::saga, dsl::id, &pagparams) + .filter(dsl::saga_state.ne(db::saga_types::SagaCachedState( + steno::SagaCachedState::Done, + ))) + .filter(dsl::current_sec.eq(*sec_id)) + .load_async(self.pool()) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::SagaDbg, + LookupType::ById(sec_id.0), + ), + ) + }) + } + + pub async fn saga_node_event_list_by_id( + &self, + id: db::saga_types::SagaId, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + use db::schema::saga_node_event::dsl; + paginated(dsl::saga_node_event, dsl::saga_id, &pagparams) + .filter(dsl::saga_id.eq(id)) + .load_async::(self.pool()) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::SagaDbg, + LookupType::ById(id.0 .0), + ), + ) + })? + .into_iter() + .map(|db_event| steno::SagaNodeEvent::try_from(db_event)) + .collect::>() + } +} diff --git a/nexus/src/db/datastore/service.rs b/nexus/src/db/datastore/service.rs new file mode 100644 index 00000000000..b2665b48cdd --- /dev/null +++ b/nexus/src/db/datastore/service.rs @@ -0,0 +1,66 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods on [`Service`]s. + +use super::DataStore; +use crate::context::OpContext; +use crate::db; +use crate::db::collection_insert::AsyncInsertError; +use crate::db::collection_insert::DatastoreCollection; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::identity::Asset; +use crate::db::model::Service; +use crate::db::model::Sled; +use chrono::Utc; +use diesel::prelude::*; +use diesel::upsert::excluded; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::Error; +use omicron_common::api::external::LookupType; +use omicron_common::api::external::ResourceType; + +impl DataStore { + /// Stores a new service in the database. + pub async fn service_upsert( + &self, + opctx: &OpContext, + service: Service, + ) -> CreateResult { + use db::schema::service::dsl; + + let sled_id = service.sled_id; + Sled::insert_resource( + sled_id, + diesel::insert_into(dsl::service) + .values(service.clone()) + .on_conflict(dsl::id) + .do_update() + .set(( + dsl::time_modified.eq(Utc::now()), + dsl::sled_id.eq(excluded(dsl::sled_id)), + dsl::ip.eq(excluded(dsl::ip)), + dsl::kind.eq(excluded(dsl::kind)), + )), + ) + .insert_and_get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| match e { + AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { + type_name: ResourceType::Sled, + lookup_type: LookupType::ById(sled_id), + }, + AsyncInsertError::DatabaseError(e) => { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict( + ResourceType::Service, + &service.id().to_string(), + ), + ) + } + }) + } +} diff --git a/nexus/src/db/datastore/silo.rs b/nexus/src/db/datastore/silo.rs new file mode 100644 index 00000000000..8c906e38c0a --- /dev/null +++ b/nexus/src/db/datastore/silo.rs @@ -0,0 +1,231 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods related to [`Silo`]s. + +use super::DataStore; +use crate::authz; +use crate::context::OpContext; +use crate::db; +use crate::db::error::diesel_pool_result_optional; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::fixed_data::silo::DEFAULT_SILO; +use crate::db::identity::Resource; +use crate::db::model::Name; +use crate::db::model::Silo; +use crate::db::pagination::paginated; +use async_bb8_diesel::AsyncRunQueryDsl; +use chrono::Utc; +use diesel::prelude::*; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DataPageParams; +use omicron_common::api::external::DeleteResult; +use omicron_common::api::external::Error; +use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::ResourceType; +use uuid::Uuid; + +impl DataStore { + /// Load built-in silos into the database + pub async fn load_builtin_silos( + &self, + opctx: &OpContext, + ) -> Result<(), Error> { + opctx.authorize(authz::Action::Modify, &authz::DATABASE).await?; + + debug!(opctx.log, "attempting to create built-in silo"); + + use db::schema::silo::dsl; + let count = diesel::insert_into(dsl::silo) + .values(&*DEFAULT_SILO) + .on_conflict(dsl::id) + .do_nothing() + .execute_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool(e, ErrorHandler::Server) + })?; + info!(opctx.log, "created {} built-in silos", count); + Ok(()) + } + + pub async fn silo_create( + &self, + opctx: &OpContext, + silo: Silo, + ) -> CreateResult { + opctx.authorize(authz::Action::CreateChild, &authz::FLEET).await?; + + let silo_id = silo.id(); + + use db::schema::silo::dsl; + diesel::insert_into(dsl::silo) + .values(silo) + .returning(Silo::as_returning()) + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict( + ResourceType::Silo, + silo_id.to_string().as_str(), + ), + ) + }) + } + + pub async fn silos_list_by_id( + &self, + opctx: &OpContext, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; + + use db::schema::silo::dsl; + paginated(dsl::silo, dsl::id, pagparams) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::discoverable.eq(true)) + .select(Silo::as_select()) + .load_async::(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + pub async fn silos_list_by_name( + &self, + opctx: &OpContext, + pagparams: &DataPageParams<'_, Name>, + ) -> ListResultVec { + opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; + + use db::schema::silo::dsl; + paginated(dsl::silo, dsl::name, pagparams) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::discoverable.eq(true)) + .select(Silo::as_select()) + .load_async::(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + pub async fn silo_delete( + &self, + opctx: &OpContext, + authz_silo: &authz::Silo, + db_silo: &db::model::Silo, + ) -> DeleteResult { + assert_eq!(authz_silo.id(), db_silo.id()); + opctx.authorize(authz::Action::Delete, authz_silo).await?; + + use db::schema::organization; + use db::schema::silo; + use db::schema::silo_user; + + // Make sure there are no organizations present within this silo. + let id = authz_silo.id(); + let rcgen = db_silo.rcgen; + let org_found = diesel_pool_result_optional( + organization::dsl::organization + .filter(organization::dsl::silo_id.eq(id)) + .filter(organization::dsl::time_deleted.is_null()) + .select(organization::dsl::id) + .limit(1) + .first_async::(self.pool_authorized(opctx).await?) + .await, + ) + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; + + if org_found.is_some() { + return Err(Error::InvalidRequest { + message: "silo to be deleted contains an organization" + .to_string(), + }); + } + + let now = Utc::now(); + let updated_rows = diesel::update(silo::dsl::silo) + .filter(silo::dsl::time_deleted.is_null()) + .filter(silo::dsl::id.eq(id)) + .filter(silo::dsl::rcgen.eq(rcgen)) + .set(silo::dsl::time_deleted.eq(now)) + .execute_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByResource(authz_silo), + ) + })?; + + if updated_rows == 0 { + return Err(Error::InvalidRequest { + message: "silo deletion failed due to concurrent modification" + .to_string(), + }); + } + + info!(opctx.log, "deleted silo {}", id); + + // If silo deletion succeeded, delete all silo users + // TODO-correctness This needs to happen in a saga or some other + // mechanism that ensures it happens even if we crash at this point. + // TODO-scalability This needs to happen in batches + let updated_rows = diesel::update(silo_user::dsl::silo_user) + .filter(silo_user::dsl::silo_id.eq(id)) + .filter(silo_user::dsl::time_deleted.is_null()) + .set(silo_user::dsl::time_deleted.eq(now)) + .execute_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByResource(authz_silo), + ) + })?; + + info!(opctx.log, "deleted {} silo users for silo {}", updated_rows, id); + + // delete all silo identity providers + use db::schema::identity_provider::dsl as idp_dsl; + + let updated_rows = diesel::update(idp_dsl::identity_provider) + .filter(idp_dsl::silo_id.eq(id)) + .filter(idp_dsl::time_deleted.is_null()) + .set(idp_dsl::time_deleted.eq(Utc::now())) + .execute_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByResource(authz_silo), + ) + })?; + + info!(opctx.log, "deleted {} silo IdPs for silo {}", updated_rows, id); + + use db::schema::saml_identity_provider::dsl as saml_idp_dsl; + + let updated_rows = diesel::update(saml_idp_dsl::saml_identity_provider) + .filter(saml_idp_dsl::silo_id.eq(id)) + .filter(saml_idp_dsl::time_deleted.is_null()) + .set(saml_idp_dsl::time_deleted.eq(Utc::now())) + .execute_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByResource(authz_silo), + ) + })?; + + info!( + opctx.log, + "deleted {} silo saml IdPs for silo {}", updated_rows, id + ); + + Ok(()) + } +} diff --git a/nexus/src/db/datastore/silo_user.rs b/nexus/src/db/datastore/silo_user.rs new file mode 100644 index 00000000000..aad72987124 --- /dev/null +++ b/nexus/src/db/datastore/silo_user.rs @@ -0,0 +1,219 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods related to [`SiloUser`]s. + +use super::DataStore; +use crate::authn; +use crate::authz; +use crate::context::OpContext; +use crate::db; +use crate::db::datastore::IdentityMetadataCreateParams; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::model::Name; +use crate::db::model::SiloUser; +use crate::db::model::UserBuiltin; +use crate::db::pagination::paginated; +use crate::external_api::params; +use async_bb8_diesel::AsyncRunQueryDsl; +use diesel::prelude::*; +use omicron_common::api::external::DataPageParams; +use omicron_common::api::external::Error; +use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::LookupType; +use omicron_common::api::external::ResourceType; +use uuid::Uuid; + +impl DataStore { + /// Create a silo user + pub async fn silo_user_create( + &self, + silo_user: SiloUser, + ) -> Result { + use db::schema::silo_user::dsl; + + let silo_user_external_id = silo_user.external_id.clone(); + diesel::insert_into(dsl::silo_user) + .values(silo_user) + .returning(SiloUser::as_returning()) + .get_result_async(self.pool()) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict( + ResourceType::SiloUser, + &silo_user_external_id, + ), + ) + }) + } + + /// Given an external ID, return + /// - Ok(Some(SiloUser)) if that external id refers to an existing silo user + /// - Ok(None) if it does not + /// - Err(...) if there was an error doing this lookup. + pub async fn silo_user_fetch_by_external_id( + &self, + opctx: &OpContext, + authz_silo: &authz::Silo, + external_id: &str, + ) -> Result, Error> { + opctx.authorize(authz::Action::ListChildren, authz_silo).await?; + + use db::schema::silo_user::dsl; + + Ok(dsl::silo_user + .filter(dsl::silo_id.eq(authz_silo.id())) + .filter(dsl::external_id.eq(external_id.to_string())) + .filter(dsl::time_deleted.is_null()) + .select(SiloUser::as_select()) + .load_async::(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::SiloUser, + LookupType::ByName(external_id.to_string()), + ), + ) + })? + .pop()) + } + + pub async fn silo_users_list_by_id( + &self, + opctx: &OpContext, + authz_silo: &authz::Silo, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + use db::schema::silo_user::dsl; + + opctx.authorize(authz::Action::Read, authz_silo).await?; + paginated(dsl::silo_user, dsl::id, pagparams) + .filter(dsl::silo_id.eq(authz_silo.id())) + .filter(dsl::time_deleted.is_null()) + .select(SiloUser::as_select()) + .load_async::(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + pub async fn users_builtin_list_by_name( + &self, + opctx: &OpContext, + pagparams: &DataPageParams<'_, Name>, + ) -> ListResultVec { + use db::schema::user_builtin::dsl; + opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; + paginated(dsl::user_builtin, dsl::name, pagparams) + .select(UserBuiltin::as_select()) + .load_async::(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + /// Load built-in users into the database + pub async fn load_builtin_users( + &self, + opctx: &OpContext, + ) -> Result<(), Error> { + use db::schema::user_builtin::dsl; + + opctx.authorize(authz::Action::Modify, &authz::DATABASE).await?; + + let builtin_users = [ + // Note: "db_init" is also a builtin user, but that one by necessity + // is created with the database. + &*authn::USER_SERVICE_BALANCER, + &*authn::USER_INTERNAL_API, + &*authn::USER_INTERNAL_READ, + &*authn::USER_EXTERNAL_AUTHN, + &*authn::USER_SAGA_RECOVERY, + ] + .iter() + .map(|u| { + UserBuiltin::new( + u.id, + params::UserBuiltinCreate { + identity: IdentityMetadataCreateParams { + name: u.name.clone(), + description: String::from(u.description), + }, + }, + ) + }) + .collect::>(); + + debug!(opctx.log, "attempting to create built-in users"); + let count = diesel::insert_into(dsl::user_builtin) + .values(builtin_users) + .on_conflict(dsl::id) + .do_nothing() + .execute_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool(e, ErrorHandler::Server) + })?; + info!(opctx.log, "created {} built-in users", count); + + Ok(()) + } + + /// Load the testing users into the database + pub async fn load_silo_users( + &self, + opctx: &OpContext, + ) -> Result<(), Error> { + use db::schema::silo_user::dsl; + + opctx.authorize(authz::Action::Modify, &authz::DATABASE).await?; + + let users = + [&*authn::USER_TEST_PRIVILEGED, &*authn::USER_TEST_UNPRIVILEGED]; + + debug!(opctx.log, "attempting to create silo users"); + let count = diesel::insert_into(dsl::silo_user) + .values(users) + .on_conflict(dsl::id) + .do_nothing() + .execute_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool(e, ErrorHandler::Server) + })?; + info!(opctx.log, "created {} silo users", count); + + Ok(()) + } + + /// Load role assignments for the test users into the database + pub async fn load_silo_user_role_assignments( + &self, + opctx: &OpContext, + ) -> Result<(), Error> { + use db::schema::role_assignment::dsl; + debug!(opctx.log, "attempting to create silo user role assignments"); + let count = diesel::insert_into(dsl::role_assignment) + .values(&*db::fixed_data::silo_user::ROLE_ASSIGNMENTS_PRIVILEGED) + .on_conflict(( + dsl::identity_type, + dsl::identity_id, + dsl::resource_type, + dsl::resource_id, + dsl::role_name, + )) + .do_nothing() + .execute_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool(e, ErrorHandler::Server) + })?; + info!(opctx.log, "created {} silo user role assignments", count); + + Ok(()) + } +} diff --git a/nexus/src/db/datastore/sled.rs b/nexus/src/db/datastore/sled.rs new file mode 100644 index 00000000000..3f97bfcc540 --- /dev/null +++ b/nexus/src/db/datastore/sled.rs @@ -0,0 +1,65 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods on [`Sled`]s. + +use super::DataStore; +use crate::authz; +use crate::context::OpContext; +use crate::db; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::identity::Asset; +use crate::db::model::Sled; +use crate::db::pagination::paginated; +use async_bb8_diesel::AsyncRunQueryDsl; +use chrono::Utc; +use diesel::prelude::*; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DataPageParams; +use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::ResourceType; +use uuid::Uuid; + +impl DataStore { + /// Stores a new sled in the database. + pub async fn sled_upsert(&self, sled: Sled) -> CreateResult { + use db::schema::sled::dsl; + diesel::insert_into(dsl::sled) + .values(sled.clone()) + .on_conflict(dsl::id) + .do_update() + .set(( + dsl::time_modified.eq(Utc::now()), + dsl::ip.eq(sled.ip), + dsl::port.eq(sled.port), + )) + .returning(Sled::as_returning()) + .get_result_async(self.pool()) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict( + ResourceType::Sled, + &sled.id().to_string(), + ), + ) + }) + } + + pub async fn sled_list( + &self, + opctx: &OpContext, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + use db::schema::sled::dsl; + paginated(dsl::sled, dsl::id, pagparams) + .select(Sled::as_select()) + .load_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } +} diff --git a/nexus/src/db/datastore/ssh_key.rs b/nexus/src/db/datastore/ssh_key.rs new file mode 100644 index 00000000000..5c9b247f8fc --- /dev/null +++ b/nexus/src/db/datastore/ssh_key.rs @@ -0,0 +1,95 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods related to [`SshKey`]s. + +use super::DataStore; +use crate::authz; +use crate::context::OpContext; +use crate::db; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::identity::Resource; +use crate::db::model::Name; +use crate::db::model::SshKey; +use crate::db::pagination::paginated; +use crate::db::update_and_check::UpdateAndCheck; +use async_bb8_diesel::AsyncRunQueryDsl; +use chrono::Utc; +use diesel::prelude::*; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DataPageParams; +use omicron_common::api::external::DeleteResult; +use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::ResourceType; + +impl DataStore { + pub async fn ssh_keys_list( + &self, + opctx: &OpContext, + authz_user: &authz::SiloUser, + page_params: &DataPageParams<'_, Name>, + ) -> ListResultVec { + opctx.authorize(authz::Action::ListChildren, authz_user).await?; + + use db::schema::ssh_key::dsl; + paginated(dsl::ssh_key, dsl::name, page_params) + .filter(dsl::silo_user_id.eq(authz_user.id())) + .filter(dsl::time_deleted.is_null()) + .select(SshKey::as_select()) + .load_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + /// Create a new SSH public key for a user. + pub async fn ssh_key_create( + &self, + opctx: &OpContext, + authz_user: &authz::SiloUser, + ssh_key: SshKey, + ) -> CreateResult { + assert_eq!(authz_user.id(), ssh_key.silo_user_id); + opctx.authorize(authz::Action::CreateChild, authz_user).await?; + let name = ssh_key.name().to_string(); + + use db::schema::ssh_key::dsl; + diesel::insert_into(dsl::ssh_key) + .values(ssh_key) + .returning(SshKey::as_returning()) + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict(ResourceType::SshKey, &name), + ) + }) + } + + /// Delete an existing SSH public key. + pub async fn ssh_key_delete( + &self, + opctx: &OpContext, + authz_ssh_key: &authz::SshKey, + ) -> DeleteResult { + opctx.authorize(authz::Action::Delete, authz_ssh_key).await?; + + use db::schema::ssh_key::dsl; + diesel::update(dsl::ssh_key) + .filter(dsl::id.eq(authz_ssh_key.id())) + .filter(dsl::time_deleted.is_null()) + .set(dsl::time_deleted.eq(Utc::now())) + .check_if_exists::(authz_ssh_key.id()) + .execute_and_check(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByResource(authz_ssh_key), + ) + })?; + Ok(()) + } +} diff --git a/nexus/src/db/datastore/update.rs b/nexus/src/db/datastore/update.rs new file mode 100644 index 00000000000..b0113db8a97 --- /dev/null +++ b/nexus/src/db/datastore/update.rs @@ -0,0 +1,59 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods related to updates and artifacts. + +use super::DataStore; +use crate::authz; +use crate::context::OpContext; +use crate::db; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::model::UpdateAvailableArtifact; +use async_bb8_diesel::AsyncRunQueryDsl; +use diesel::prelude::*; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DeleteResult; +use omicron_common::api::external::InternalContext; + +impl DataStore { + pub async fn update_available_artifact_upsert( + &self, + opctx: &OpContext, + artifact: UpdateAvailableArtifact, + ) -> CreateResult { + opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + + use db::schema::update_available_artifact::dsl; + diesel::insert_into(dsl::update_available_artifact) + .values(artifact.clone()) + .on_conflict((dsl::name, dsl::version, dsl::kind)) + .do_update() + .set(artifact.clone()) + .returning(UpdateAvailableArtifact::as_returning()) + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + pub async fn update_available_artifact_hard_delete_outdated( + &self, + opctx: &OpContext, + current_targets_role_version: i64, + ) -> DeleteResult { + opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + + // We use the `targets_role_version` column in the table to delete any + // old rows, keeping the table in sync with the current copy of + // artifacts.json. + use db::schema::update_available_artifact::dsl; + diesel::delete(dsl::update_available_artifact) + .filter(dsl::targets_role_version.lt(current_targets_role_version)) + .execute_async(self.pool_authorized(opctx).await?) + .await + .map(|_rows_deleted| ()) + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .internal_context("deleting outdated available artifacts") + } +} diff --git a/nexus/src/db/datastore/volume.rs b/nexus/src/db/datastore/volume.rs new file mode 100644 index 00000000000..1d20dd1cdf7 --- /dev/null +++ b/nexus/src/db/datastore/volume.rs @@ -0,0 +1,78 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods on [`Volume`]s. + +use super::DataStore; +use crate::db; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::identity::Asset; +use crate::db::model::Volume; +use crate::db::update_and_check::UpdateAndCheck; +use async_bb8_diesel::AsyncRunQueryDsl; +use chrono::Utc; +use diesel::prelude::*; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DeleteResult; +use omicron_common::api::external::LookupResult; +use omicron_common::api::external::LookupType; +use omicron_common::api::external::ResourceType; +use uuid::Uuid; + +impl DataStore { + pub async fn volume_create(&self, volume: Volume) -> CreateResult { + use db::schema::volume::dsl; + + diesel::insert_into(dsl::volume) + .values(volume.clone()) + .on_conflict(dsl::id) + .do_nothing() + .returning(Volume::as_returning()) + .get_result_async(self.pool()) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict( + ResourceType::Volume, + volume.id().to_string().as_str(), + ), + ) + }) + } + + pub async fn volume_delete(&self, volume_id: Uuid) -> DeleteResult { + use db::schema::volume::dsl; + + let now = Utc::now(); + diesel::update(dsl::volume) + .filter(dsl::id.eq(volume_id)) + .set(dsl::time_deleted.eq(now)) + .check_if_exists::(volume_id) + .execute_and_check(self.pool()) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::Volume, + LookupType::ById(volume_id), + ), + ) + })?; + Ok(()) + } + + pub async fn volume_get(&self, volume_id: Uuid) -> LookupResult { + use db::schema::volume::dsl; + + dsl::volume + .filter(dsl::id.eq(volume_id)) + .select(Volume::as_select()) + .get_result_async(self.pool()) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } +} diff --git a/nexus/src/db/datastore/vpc.rs b/nexus/src/db/datastore/vpc.rs new file mode 100644 index 00000000000..fb570815ae4 --- /dev/null +++ b/nexus/src/db/datastore/vpc.rs @@ -0,0 +1,605 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods on [`Vpc`]s. + +use super::DataStore; +use crate::authz; +use crate::context::OpContext; +use crate::db; +use crate::db::collection_insert::AsyncInsertError; +use crate::db::collection_insert::DatastoreCollection; +use crate::db::collection_insert::SyncInsertError; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::error::TransactionError; +use crate::db::identity::Resource; +use crate::db::model::IncompleteVpc; +use crate::db::model::Name; +use crate::db::model::NetworkInterface; +use crate::db::model::RouterRoute; +use crate::db::model::RouterRouteUpdate; +use crate::db::model::Vpc; +use crate::db::model::VpcFirewallRule; +use crate::db::model::VpcRouter; +use crate::db::model::VpcRouterUpdate; +use crate::db::model::VpcSubnet; +use crate::db::model::VpcSubnetUpdate; +use crate::db::model::VpcUpdate; +use crate::db::pagination::paginated; +use crate::db::queries::vpc::InsertVpcQuery; +use crate::db::queries::vpc_subnet::FilterConflictingVpcSubnetRangesQuery; +use crate::db::queries::vpc_subnet::SubnetError; +use async_bb8_diesel::AsyncConnection; +use async_bb8_diesel::AsyncRunQueryDsl; +use chrono::Utc; +use diesel::prelude::*; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DataPageParams; +use omicron_common::api::external::DeleteResult; +use omicron_common::api::external::Error; +use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::LookupType; +use omicron_common::api::external::ResourceType; +use omicron_common::api::external::UpdateResult; + +impl DataStore { + pub async fn project_list_vpcs( + &self, + opctx: &OpContext, + authz_project: &authz::Project, + pagparams: &DataPageParams<'_, Name>, + ) -> ListResultVec { + opctx.authorize(authz::Action::ListChildren, authz_project).await?; + + use db::schema::vpc::dsl; + paginated(dsl::vpc, dsl::name, &pagparams) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::project_id.eq(authz_project.id())) + .select(Vpc::as_select()) + .load_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + pub async fn project_create_vpc( + &self, + opctx: &OpContext, + authz_project: &authz::Project, + vpc: IncompleteVpc, + ) -> Result<(authz::Vpc, Vpc), Error> { + use db::schema::vpc::dsl; + + assert_eq!(authz_project.id(), vpc.project_id); + opctx.authorize(authz::Action::CreateChild, authz_project).await?; + + // TODO-correctness Shouldn't this use "insert_resource"? + // + // Note that to do so requires adding an `rcgen` column to the project + // table. + let name = vpc.identity.name.clone(); + let query = InsertVpcQuery::new(vpc); + let vpc = diesel::insert_into(dsl::vpc) + .values(query) + .returning(Vpc::as_returning()) + .get_result_async(self.pool()) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict(ResourceType::Vpc, name.as_str()), + ) + })?; + Ok(( + authz::Vpc::new( + authz_project.clone(), + vpc.id(), + LookupType::ByName(vpc.name().to_string()), + ), + vpc, + )) + } + + pub async fn project_update_vpc( + &self, + opctx: &OpContext, + authz_vpc: &authz::Vpc, + updates: VpcUpdate, + ) -> UpdateResult { + opctx.authorize(authz::Action::Modify, authz_vpc).await?; + + use db::schema::vpc::dsl; + diesel::update(dsl::vpc) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(authz_vpc.id())) + .set(updates) + .returning(Vpc::as_returning()) + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByResource(authz_vpc), + ) + }) + } + + pub async fn project_delete_vpc( + &self, + opctx: &OpContext, + authz_vpc: &authz::Vpc, + ) -> DeleteResult { + opctx.authorize(authz::Action::Delete, authz_vpc).await?; + + use db::schema::vpc::dsl; + + // Note that we don't ensure the firewall rules are empty here, because + // we allow deleting VPCs with firewall rules present. Inserting new + // rules is serialized with respect to the deletion by the row lock + // associated with the VPC row, since we use the collection insert CTE + // pattern to add firewall rules. + + let now = Utc::now(); + diesel::update(dsl::vpc) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(authz_vpc.id())) + .set(dsl::time_deleted.eq(now)) + .returning(Vpc::as_returning()) + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByResource(authz_vpc), + ) + })?; + Ok(()) + } + + pub async fn vpc_list_firewall_rules( + &self, + opctx: &OpContext, + authz_vpc: &authz::Vpc, + ) -> ListResultVec { + // Firewall rules are modeled in the API as a single resource under the + // Vpc (rather than individual child resources with their own CRUD + // endpoints). You cannot look them up individually, create them, + // remove them, or update them. You can only modify the whole set. So + // for authz, we treat them as part of the Vpc itself. + opctx.authorize(authz::Action::Read, authz_vpc).await?; + use db::schema::vpc_firewall_rule::dsl; + + dsl::vpc_firewall_rule + .filter(dsl::time_deleted.is_null()) + .filter(dsl::vpc_id.eq(authz_vpc.id())) + .order(dsl::name.asc()) + .select(VpcFirewallRule::as_select()) + .load_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + pub async fn vpc_delete_all_firewall_rules( + &self, + opctx: &OpContext, + authz_vpc: &authz::Vpc, + ) -> DeleteResult { + opctx.authorize(authz::Action::Modify, authz_vpc).await?; + use db::schema::vpc_firewall_rule::dsl; + + let now = Utc::now(); + // TODO-performance: Paginate this update to avoid long queries + diesel::update(dsl::vpc_firewall_rule) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::vpc_id.eq(authz_vpc.id())) + .set(dsl::time_deleted.eq(now)) + .execute_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByResource(authz_vpc), + ) + })?; + Ok(()) + } + + /// Replace all firewall rules with the given rules + pub async fn vpc_update_firewall_rules( + &self, + opctx: &OpContext, + authz_vpc: &authz::Vpc, + mut rules: Vec, + ) -> UpdateResult> { + opctx.authorize(authz::Action::Modify, authz_vpc).await?; + for r in &rules { + assert_eq!(r.vpc_id, authz_vpc.id()); + } + + // Sort the rules in the same order that we would return them when + // listing them. This is because we're going to use RETURNING to return + // the inserted rows from the database and we want them to come back in + // the same order that we would normally list them. + rules.sort_by_key(|r| r.name().to_string()); + + use db::schema::vpc_firewall_rule::dsl; + + let now = Utc::now(); + let delete_old_query = diesel::update(dsl::vpc_firewall_rule) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::vpc_id.eq(authz_vpc.id())) + .set(dsl::time_deleted.eq(now)); + + let insert_new_query = Vpc::insert_resource( + authz_vpc.id(), + diesel::insert_into(dsl::vpc_firewall_rule).values(rules), + ); + + #[derive(Debug)] + enum FirewallUpdateError { + CollectionNotFound, + } + type TxnError = TransactionError; + + // TODO-scalability: Ideally this would be a CTE so we don't need to + // hold a transaction open across multiple roundtrips from the database, + // but for now we're using a transaction due to the severely decreased + // legibility of CTEs via diesel right now. + self.pool_authorized(opctx) + .await? + .transaction(move |conn| { + delete_old_query.execute(conn)?; + + // The generation count update on the vpc table row will take a + // write lock on the row, ensuring that the vpc was not deleted + // concurently. + insert_new_query.insert_and_get_results(conn).map_err(|e| { + match e { + SyncInsertError::CollectionNotFound => { + TxnError::CustomError( + FirewallUpdateError::CollectionNotFound, + ) + } + SyncInsertError::DatabaseError(e) => e.into(), + } + }) + }) + .await + .map_err(|e| match e { + TxnError::CustomError( + FirewallUpdateError::CollectionNotFound, + ) => Error::not_found_by_id(ResourceType::Vpc, &authz_vpc.id()), + TxnError::Pool(e) => public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByResource(authz_vpc), + ), + }) + } + + pub async fn vpc_list_subnets( + &self, + opctx: &OpContext, + authz_vpc: &authz::Vpc, + pagparams: &DataPageParams<'_, Name>, + ) -> ListResultVec { + opctx.authorize(authz::Action::ListChildren, authz_vpc).await?; + + use db::schema::vpc_subnet::dsl; + paginated(dsl::vpc_subnet, dsl::name, &pagparams) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::vpc_id.eq(authz_vpc.id())) + .select(VpcSubnet::as_select()) + .load_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + /// Insert a VPC Subnet, checking for unique IP address ranges. + pub async fn vpc_create_subnet( + &self, + opctx: &OpContext, + authz_vpc: &authz::Vpc, + subnet: VpcSubnet, + ) -> Result { + opctx + .authorize(authz::Action::CreateChild, authz_vpc) + .await + .map_err(SubnetError::External)?; + assert_eq!(authz_vpc.id(), subnet.vpc_id); + + self.vpc_create_subnet_raw(subnet).await + } + + pub(crate) async fn vpc_create_subnet_raw( + &self, + subnet: VpcSubnet, + ) -> Result { + use db::schema::vpc_subnet::dsl; + let values = FilterConflictingVpcSubnetRangesQuery::new(subnet.clone()); + diesel::insert_into(dsl::vpc_subnet) + .values(values) + .returning(VpcSubnet::as_returning()) + .get_result_async(self.pool()) + .await + .map_err(|e| SubnetError::from_pool(e, &subnet)) + } + + pub async fn vpc_delete_subnet( + &self, + opctx: &OpContext, + authz_subnet: &authz::VpcSubnet, + ) -> DeleteResult { + opctx.authorize(authz::Action::Delete, authz_subnet).await?; + + use db::schema::vpc_subnet::dsl; + let now = Utc::now(); + diesel::update(dsl::vpc_subnet) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(authz_subnet.id())) + .set(dsl::time_deleted.eq(now)) + .returning(VpcSubnet::as_returning()) + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByResource(authz_subnet), + ) + })?; + Ok(()) + } + + pub async fn vpc_update_subnet( + &self, + opctx: &OpContext, + authz_subnet: &authz::VpcSubnet, + updates: VpcSubnetUpdate, + ) -> UpdateResult { + opctx.authorize(authz::Action::Modify, authz_subnet).await?; + + use db::schema::vpc_subnet::dsl; + diesel::update(dsl::vpc_subnet) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(authz_subnet.id())) + .set(updates) + .returning(VpcSubnet::as_returning()) + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByResource(authz_subnet), + ) + }) + } + + pub async fn subnet_list_network_interfaces( + &self, + opctx: &OpContext, + authz_subnet: &authz::VpcSubnet, + pagparams: &DataPageParams<'_, Name>, + ) -> ListResultVec { + opctx.authorize(authz::Action::ListChildren, authz_subnet).await?; + + use db::schema::network_interface::dsl; + paginated(dsl::network_interface, dsl::name, pagparams) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::subnet_id.eq(authz_subnet.id())) + .select(NetworkInterface::as_select()) + .load_async::( + self.pool_authorized(opctx).await?, + ) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + pub async fn vpc_list_routers( + &self, + opctx: &OpContext, + authz_vpc: &authz::Vpc, + pagparams: &DataPageParams<'_, Name>, + ) -> ListResultVec { + opctx.authorize(authz::Action::ListChildren, authz_vpc).await?; + + use db::schema::vpc_router::dsl; + paginated(dsl::vpc_router, dsl::name, pagparams) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::vpc_id.eq(authz_vpc.id())) + .select(VpcRouter::as_select()) + .load_async::( + self.pool_authorized(opctx).await?, + ) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + pub async fn vpc_create_router( + &self, + opctx: &OpContext, + authz_vpc: &authz::Vpc, + router: VpcRouter, + ) -> CreateResult<(authz::VpcRouter, VpcRouter)> { + opctx.authorize(authz::Action::CreateChild, authz_vpc).await?; + + use db::schema::vpc_router::dsl; + let name = router.name().clone(); + let router = diesel::insert_into(dsl::vpc_router) + .values(router) + .on_conflict(dsl::id) + .do_nothing() + .returning(VpcRouter::as_returning()) + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict( + ResourceType::VpcRouter, + name.as_str(), + ), + ) + })?; + Ok(( + authz::VpcRouter::new( + authz_vpc.clone(), + router.id(), + LookupType::ById(router.id()), + ), + router, + )) + } + + pub async fn vpc_delete_router( + &self, + opctx: &OpContext, + authz_router: &authz::VpcRouter, + ) -> DeleteResult { + opctx.authorize(authz::Action::Delete, authz_router).await?; + + use db::schema::vpc_router::dsl; + let now = Utc::now(); + diesel::update(dsl::vpc_router) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(authz_router.id())) + .set(dsl::time_deleted.eq(now)) + .returning(VpcRouter::as_returning()) + .get_result_async(self.pool()) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByResource(authz_router), + ) + })?; + Ok(()) + } + + pub async fn vpc_update_router( + &self, + opctx: &OpContext, + authz_router: &authz::VpcRouter, + updates: VpcRouterUpdate, + ) -> UpdateResult { + opctx.authorize(authz::Action::Modify, authz_router).await?; + + use db::schema::vpc_router::dsl; + diesel::update(dsl::vpc_router) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(authz_router.id())) + .set(updates) + .returning(VpcRouter::as_returning()) + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByResource(authz_router), + ) + }) + } + + pub async fn router_list_routes( + &self, + opctx: &OpContext, + authz_router: &authz::VpcRouter, + pagparams: &DataPageParams<'_, Name>, + ) -> ListResultVec { + opctx.authorize(authz::Action::ListChildren, authz_router).await?; + + use db::schema::router_route::dsl; + paginated(dsl::router_route, dsl::name, pagparams) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::vpc_router_id.eq(authz_router.id())) + .select(RouterRoute::as_select()) + .load_async::( + self.pool_authorized(opctx).await?, + ) + .await + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + } + + pub async fn router_create_route( + &self, + opctx: &OpContext, + authz_router: &authz::VpcRouter, + route: RouterRoute, + ) -> CreateResult { + assert_eq!(authz_router.id(), route.vpc_router_id); + opctx.authorize(authz::Action::CreateChild, authz_router).await?; + + use db::schema::router_route::dsl; + let router_id = route.vpc_router_id; + let name = route.name().clone(); + + VpcRouter::insert_resource( + router_id, + diesel::insert_into(dsl::router_route).values(route), + ) + .insert_and_get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| match e { + AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { + type_name: ResourceType::VpcRouter, + lookup_type: LookupType::ById(router_id), + }, + AsyncInsertError::DatabaseError(e) => { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict( + ResourceType::RouterRoute, + name.as_str(), + ), + ) + } + }) + } + + pub async fn router_delete_route( + &self, + opctx: &OpContext, + authz_route: &authz::RouterRoute, + ) -> DeleteResult { + opctx.authorize(authz::Action::Delete, authz_route).await?; + + use db::schema::router_route::dsl; + let now = Utc::now(); + diesel::update(dsl::router_route) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(authz_route.id())) + .set(dsl::time_deleted.eq(now)) + .execute_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByResource(authz_route), + ) + })?; + Ok(()) + } + + pub async fn router_update_route( + &self, + opctx: &OpContext, + authz_route: &authz::RouterRoute, + route_update: RouterRouteUpdate, + ) -> UpdateResult { + opctx.authorize(authz::Action::Modify, authz_route).await?; + + use db::schema::router_route::dsl; + diesel::update(dsl::router_route) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(authz_route.id())) + .set(route_update) + .returning(RouterRoute::as_returning()) + .get_result_async(self.pool_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel_pool( + e, + ErrorHandler::NotFoundByResource(authz_route), + ) + }) + } +} diff --git a/nexus/src/db/datastore/zpool.rs b/nexus/src/db/datastore/zpool.rs new file mode 100644 index 00000000000..b2fb6cdf7a2 --- /dev/null +++ b/nexus/src/db/datastore/zpool.rs @@ -0,0 +1,60 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods on [`Zpool`]s. + +use super::DataStore; +use crate::db; +use crate::db::collection_insert::AsyncInsertError; +use crate::db::collection_insert::DatastoreCollection; +use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::ErrorHandler; +use crate::db::identity::Asset; +use crate::db::model::Sled; +use crate::db::model::Zpool; +use chrono::Utc; +use diesel::prelude::*; +use diesel::upsert::excluded; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::Error; +use omicron_common::api::external::LookupType; +use omicron_common::api::external::ResourceType; + +impl DataStore { + /// Stores a new zpool in the database. + pub async fn zpool_upsert(&self, zpool: Zpool) -> CreateResult { + use db::schema::zpool::dsl; + + let sled_id = zpool.sled_id; + Sled::insert_resource( + sled_id, + diesel::insert_into(dsl::zpool) + .values(zpool.clone()) + .on_conflict(dsl::id) + .do_update() + .set(( + dsl::time_modified.eq(Utc::now()), + dsl::sled_id.eq(excluded(dsl::sled_id)), + dsl::total_size.eq(excluded(dsl::total_size)), + )), + ) + .insert_and_get_result_async(self.pool()) + .await + .map_err(|e| match e { + AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { + type_name: ResourceType::Sled, + lookup_type: LookupType::ById(sled_id), + }, + AsyncInsertError::DatabaseError(e) => { + public_error_from_diesel_pool( + e, + ErrorHandler::Conflict( + ResourceType::Zpool, + &zpool.id().to_string(), + ), + ) + } + }) + } +}