Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions nexus/src/authn/external/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ mod test {
let name1 = authn::SchemeName("grunt1");
let actor1 = authn::Actor {
id: "1c91bab2-4841-669f-cc32-de80da5bbf39".parse().unwrap(),
silo_id: *crate::db::fixed_data::silo::SILO_ID,
silo_id: *crate::db::fixed_data::silo_builtin::SILO_ID,
};
let grunt1 = Box::new(GruntScheme {
name: name1,
Expand All @@ -204,7 +204,7 @@ mod test {
let name2 = authn::SchemeName("grunt2");
let actor2 = authn::Actor {
id: "799684af-533a-cb66-b5ac-ab55a791d5ef".parse().unwrap(),
silo_id: *crate::db::fixed_data::silo::SILO_ID,
silo_id: *crate::db::fixed_data::silo_builtin::SILO_ID,
};
let grunt2 = Box::new(GruntScheme {
name: name2,
Expand Down
8 changes: 5 additions & 3 deletions nexus/src/authn/external/spoof.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ lazy_static! {
/// Actor (id) used for the special "bad credentials" error
static ref SPOOF_RESERVED_BAD_CREDS_ACTOR: Actor = Actor {
id: "22222222-2222-2222-2222-222222222222".parse().unwrap(),
silo_id: *crate::db::fixed_data::silo::SILO_ID,
silo_id: *crate::db::fixed_data::silo_builtin::SILO_ID,
};
/// Complete HTTP header value to trigger the "bad actor" error
pub static ref SPOOF_HEADER_BAD_ACTOR: Authorization<Bearer> =
Expand Down Expand Up @@ -122,8 +122,10 @@ fn authn_spoof(raw_value: Option<&Authorization<Bearer>>) -> SchemeResult {

match Uuid::parse_str(str_value).context("parsing header value as UUID") {
Ok(id) => {
let actor =
Actor { id, silo_id: *crate::db::fixed_data::silo::SILO_ID };
let actor = Actor {
id,
silo_id: *crate::db::fixed_data::silo_builtin::SILO_ID,
};
SchemeResult::Authenticated(Details { actor })
}
Err(source) => SchemeResult::Failed(Reason::BadFormat { source }),
Expand Down
2 changes: 1 addition & 1 deletion nexus/src/authn/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ impl Context {
pub fn test_context_for_actor(actor_id: Uuid) -> Context {
Context::context_for_actor(
actor_id,
*crate::db::fixed_data::silo::SILO_ID,
*crate::db::fixed_data::silo_builtin::SILO_ID,
)
}
}
Expand Down
132 changes: 74 additions & 58 deletions nexus/src/db/datastore.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,29 +31,7 @@ use crate::authz::ApiResourceError;
use crate::context::OpContext;
use crate::db::fixed_data::role_assignment_builtin::BUILTIN_ROLE_ASSIGNMENTS;
use crate::db::fixed_data::role_builtin::BUILTIN_ROLES;
use crate::db::fixed_data::silo::{DEFAULT_SILO, SILO_ID};
use crate::db::lookup::LookupPath;
use crate::db::{
self,
error::{public_error_from_diesel_pool, ErrorHandler, TransactionError},
model::{
ConsoleSession, Dataset, DatasetKind, Disk, DiskRuntimeState,
Generation, IncompleteNetworkInterface, Instance, InstanceRuntimeState,
Name, NetworkInterface, Organization, OrganizationUpdate, OximeterInfo,
ProducerEndpoint, Project, ProjectUpdate, Region,
RoleAssignmentBuiltin, RoleBuiltin, RouterRoute, RouterRouteUpdate,
Silo, SiloUser, Sled, UpdateAvailableArtifact, UserBuiltin, Volume,
Vpc, VpcFirewallRule, VpcRouter, VpcRouterUpdate, VpcSubnet,
VpcSubnetUpdate, VpcUpdate, Zpool,
},
pagination::paginated,
pagination::paginated_multicolumn,
subnet_allocation::FilterConflictingVpcSubnetRangesQuery,
subnet_allocation::InsertNetworkInterfaceQuery,
subnet_allocation::NetworkInterfaceError,
subnet_allocation::SubnetError,
update_and_check::{UpdateAndCheck, UpdateStatus},
};
use crate::db::fixed_data::silo_builtin::SILO_ID;
use crate::external_api::params;
use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl, ConnectionManager};
use chrono::Utc;
Expand Down Expand Up @@ -81,6 +59,29 @@ use std::net::Ipv6Addr;
use std::sync::Arc;
use uuid::Uuid;

use crate::db::lookup::LookupPath;
use crate::db::{
self,
error::{public_error_from_diesel_pool, ErrorHandler, TransactionError},
model::{
ConsoleSession, Dataset, DatasetKind, Disk, DiskRuntimeState,
Generation, IncompleteNetworkInterface, Instance, InstanceRuntimeState,
Name, NetworkInterface, Organization, OrganizationUpdate, OximeterInfo,
ProducerEndpoint, Project, ProjectUpdate, Region,
RoleAssignmentBuiltin, RoleBuiltin, RouterRoute, RouterRouteUpdate,
Silo, SiloUser, Sled, UpdateAvailableArtifact, UserBuiltin, Volume,
Vpc, VpcFirewallRule, VpcRouter, VpcRouterUpdate, VpcSubnet,
VpcSubnetUpdate, VpcUpdate, Zpool,
},
pagination::paginated,
pagination::paginated_multicolumn,
subnet_allocation::FilterConflictingVpcSubnetRangesQuery,
subnet_allocation::InsertNetworkInterfaceQuery,
subnet_allocation::NetworkInterfaceError,
subnet_allocation::SubnetError,
update_and_check::{UpdateAndCheck, UpdateStatus},
};

// Number of unique datasets required to back a region.
// TODO: This should likely turn into a configuration option.
const REGION_REDUNDANCY_THRESHOLD: usize = 3;
Expand Down Expand Up @@ -2487,21 +2488,22 @@ impl DataStore {
&self,
opctx: &OpContext,
) -> Result<(), Error> {
opctx.authorize(authz::Action::Modify, &authz::DATABASE).await?;

debug!(opctx.log, "attempting to create built-in silo");

use db::schema::silo::dsl;
let count = diesel::insert_into(dsl::silo)
.values(&*DEFAULT_SILO)
.on_conflict(dsl::id)
.do_nothing()
.execute_async(self.pool_authorized(opctx).await?)
.await
.map_err(|e| {
public_error_from_diesel_pool(e, ErrorHandler::Server)
})?;
info!(opctx.log, "created {} built-in silos", count);
let builtin_silo = Silo::new_with_id(
*SILO_ID,
params::SiloCreate {
identity: IdentityMetadataCreateParams {
name: "fakesilo".parse().unwrap(),
description: "fake silo".to_string(),
},
discoverable: false,
},
);

let _create_result = self.silo_create(opctx, builtin_silo).await?;
info!(opctx.log, "created built-in silo");

Ok(())
}

Expand All @@ -2510,11 +2512,12 @@ impl DataStore {
opctx: &OpContext,
silo: Silo,
) -> CreateResult<Silo> {
opctx.authorize(authz::Action::CreateChild, &authz::FLEET).await?;
use db::schema::silo::dsl;

// TODO opctx.authorize

let silo_id = silo.id();

use db::schema::silo::dsl;
diesel::insert_into(dsl::silo)
.values(silo)
.returning(Silo::as_returning())
Expand All @@ -2536,9 +2539,8 @@ impl DataStore {
opctx: &OpContext,
pagparams: &DataPageParams<'_, Uuid>,
) -> ListResultVec<Silo> {
opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?;

use db::schema::silo::dsl;
// TODO opctx.authorize
paginated(dsl::silo, dsl::id, pagparams)
.filter(dsl::time_deleted.is_null())
.filter(dsl::discoverable.eq(true))
Expand All @@ -2553,9 +2555,8 @@ impl DataStore {
opctx: &OpContext,
pagparams: &DataPageParams<'_, Name>,
) -> ListResultVec<Silo> {
opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?;

use db::schema::silo::dsl;
// TODO opctx.authorize
paginated(dsl::silo, dsl::name, pagparams)
.filter(dsl::time_deleted.is_null())
.filter(dsl::discoverable.eq(true))
Expand All @@ -2568,26 +2569,38 @@ impl DataStore {
pub async fn silo_delete(
&self,
opctx: &OpContext,
authz_silo: &authz::Silo,
db_silo: &db::model::Silo,
name: &Name,
) -> DeleteResult {
assert_eq!(authz_silo.id(), db_silo.id());
opctx.authorize(authz::Action::Delete, authz_silo).await?;

use db::schema::organization;
use db::schema::silo;
use db::schema::silo_user;

// TODO opctx.authorize

let (id, rcgen) = silo::dsl::silo
.filter(silo::dsl::time_deleted.is_null())
.filter(silo::dsl::name.eq(name.clone()))
.select((silo::dsl::id, silo::dsl::rcgen))
.get_result_async::<(Uuid, Generation)>(self.pool())
.await
.map_err(|e| {
public_error_from_diesel_pool(
e,
ErrorHandler::NotFoundByLookup(
ResourceType::Silo,
LookupType::ByName(name.to_string()),
),
)
})?;

// Make sure there are no organizations present within this silo.
let id = authz_silo.id();
let rcgen = db_silo.rcgen;
let org_found = diesel_pool_result_optional(
organization::dsl::organization
.filter(organization::dsl::silo_id.eq(id))
.filter(organization::dsl::time_deleted.is_null())
.select(organization::dsl::id)
.limit(1)
.first_async::<Uuid>(self.pool_authorized(opctx).await?)
.first_async::<Uuid>(self.pool())
.await,
)
.map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?;
Expand All @@ -2605,12 +2618,15 @@ impl DataStore {
.filter(silo::dsl::id.eq(id))
.filter(silo::dsl::rcgen.eq(rcgen))
.set(silo::dsl::time_deleted.eq(now))
.execute_async(self.pool_authorized(opctx).await?)
.execute_async(self.pool())
.await
.map_err(|e| {
public_error_from_diesel_pool(
e,
ErrorHandler::NotFoundByResource(authz_silo),
ErrorHandler::NotFoundByLookup(
ResourceType::Silo,
LookupType::ById(id),
),
)
})?;

Expand All @@ -2624,22 +2640,22 @@ impl DataStore {
info!(opctx.log, "deleted silo {}", id);

// If silo deletion succeeded, delete all silo users
// TODO-correctness This needs to happen in a saga or some other
// mechanism that ensures it happens even if we crash at this point.
// TODO-scalability This needs to happen in batches
let updated_rows = diesel::update(silo_user::dsl::silo_user)
.filter(silo_user::dsl::silo_id.eq(id))
.set(silo_user::dsl::time_deleted.eq(now))
.execute_async(self.pool_authorized(opctx).await?)
.execute_async(self.pool())
.await
.map_err(|e| {
public_error_from_diesel_pool(
e,
ErrorHandler::NotFoundByResource(authz_silo),
ErrorHandler::NotFoundByLookup(
ResourceType::Silo,
LookupType::ById(id),
),
)
})?;

info!(opctx.log, "deleted {} silo users for silo {}", updated_rows, id);
info!(opctx.log, "deleted {} silo users for silo {}", updated_rows, id,);

Ok(())
}
Expand Down
2 changes: 1 addition & 1 deletion nexus/src/db/fixed_data/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ use lazy_static::lazy_static;

pub mod role_assignment_builtin;
pub mod role_builtin;
pub mod silo;
pub mod silo_builtin;
pub mod user_builtin;

lazy_static! {
Expand Down
24 changes: 0 additions & 24 deletions nexus/src/db/fixed_data/silo.rs

This file was deleted.

11 changes: 11 additions & 0 deletions nexus/src/db/fixed_data/silo_builtin.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.

use lazy_static::lazy_static;

lazy_static! {
pub static ref SILO_ID: uuid::Uuid = "001de000-5110-4000-8000-000000000000"
.parse()
.expect("invalid uuid for builtin silo id");
}
2 changes: 1 addition & 1 deletion nexus/src/db/fixed_data/user_builtin.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! Built-in users

use crate::db::fixed_data::silo::SILO_ID;
use crate::db::fixed_data::silo_builtin::SILO_ID;
use lazy_static::lazy_static;
use omicron_common::api;
use uuid::Uuid;
Expand Down
2 changes: 2 additions & 0 deletions nexus/src/external_api/http_entrypoints.rs
Original file line number Diff line number Diff line change
Expand Up @@ -230,6 +230,8 @@ pub fn external_api() -> NexusApiDescription {
// clients. Client generators use operationId to name API methods, so changing
// a function name is a breaking change from a client perspective.

// TODO authz for silo endpoints

// List all silos (that are discoverable).
#[endpoint {
method = GET,
Expand Down
7 changes: 1 addition & 6 deletions nexus/src/nexus.rs
Original file line number Diff line number Diff line change
Expand Up @@ -738,12 +738,7 @@ impl Nexus {
opctx: &OpContext,
name: &Name,
) -> DeleteResult {
let (.., authz_silo, db_silo) =
LookupPath::new(opctx, &self.db_datastore)
.silo_name(name)
.fetch_for(authz::Action::Delete)
.await?;
self.db_datastore.silo_delete(opctx, &authz_silo, &db_silo).await
self.db_datastore.silo_delete(opctx, name).await
}

// Organizations
Expand Down
Loading