From 412a6aff821edae27ff14a56414ba6f3221df1c6 Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Thu, 1 Sep 2022 14:36:37 +0000 Subject: [PATCH 01/16] wip --- bootstore/src/db/mod.rs | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/bootstore/src/db/mod.rs b/bootstore/src/db/mod.rs index a0a326021c2..effc3f38521 100644 --- a/bootstore/src/db/mod.rs +++ b/bootstore/src/db/mod.rs @@ -35,6 +35,9 @@ pub enum Error { #[allow(dead_code)] #[error("Share commit for {epoch} does not match prepare")] CommitHashMismatch { epoch: i32 }, + + #[error("No shares have been committed")] + NoSharesCommitted, } pub struct Db { @@ -128,6 +131,42 @@ impl Db { Ok(()) }) } + + pub fn get_latest_committed_share( + &mut self, + ) -> Result<(i32, Share), Error> { + self.conn.immediate_transaction(|tx| { + let epoch = Self::get_latest_committed_share_epoch(tx)?; + if epoch.is_none() { + return Err(Error::NoSharesCommitted); + } + let epoch = epoch.unwrap(); + let share = Self::get_prepared_share(tx, epoch)?; + Ok((epoch, share)) + }) + } + + pub fn get_latest_committed_share_epoch( + tx: &mut SqliteConnection, + ) -> Result, Error> { + use schema::key_share_commits::dsl; + let epoch = dsl::key_share_commits + .select(diesel::dsl::max(dsl::epoch)) + .get_result(tx)?; + Ok(epoch) + } + + pub fn get_prepared_share( + tx: &mut SqliteConnection, + epoch: i32, + ) -> Result { + use schema::key_share_prepares::dsl; + let share = dsl::key_share_prepares + .select(dsl::share) + .filter(dsl::epoch.eq(epoch)) + .get_result(tx)?; + Ok(share) + } } #[cfg(test)] From 6c474d5d99b1fefa564b9c06ebcb1dd3fd5223ee Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Fri, 2 Sep 2022 16:50:33 +0000 Subject: [PATCH 02/16] start implementing node methods --- bootstore/src/db/mod.rs | 39 +++++++++++++-------------------------- bootstore/src/messages.rs | 10 ++++++++++ bootstore/src/node.rs | 16 ++++++++++++++-- 3 files changed, 37 insertions(+), 28 deletions(-) diff --git a/bootstore/src/db/mod.rs b/bootstore/src/db/mod.rs index effc3f38521..9e7b3adf84d 100644 --- a/bootstore/src/db/mod.rs +++ b/bootstore/src/db/mod.rs @@ -135,36 +135,23 @@ impl Db { pub fn get_latest_committed_share( &mut self, ) -> Result<(i32, Share), Error> { - self.conn.immediate_transaction(|tx| { - let epoch = Self::get_latest_committed_share_epoch(tx)?; - if epoch.is_none() { - return Err(Error::NoSharesCommitted); - } - let epoch = epoch.unwrap(); - let share = Self::get_prepared_share(tx, epoch)?; - Ok((epoch, share)) - }) - } - - pub fn get_latest_committed_share_epoch( - tx: &mut SqliteConnection, - ) -> Result, Error> { - use schema::key_share_commits::dsl; - let epoch = dsl::key_share_commits - .select(diesel::dsl::max(dsl::epoch)) - .get_result(tx)?; - Ok(epoch) + use schema::key_shares::dsl; + let res = dsl::key_shares + .select((dsl::epoch, dsl::share)) + .order(dsl::epoch.desc()) + .filter(dsl::committed.eq(true)) + .get_result::<(i32, Share)>(&mut self.conn)?; + Ok(res) } - pub fn get_prepared_share( - tx: &mut SqliteConnection, - epoch: i32, - ) -> Result { - use schema::key_share_prepares::dsl; - let share = dsl::key_share_prepares + pub fn get_committed_share(&mut self, epoch: i32) -> Result { + use schema::key_shares::dsl; + let share = dsl::key_shares .select(dsl::share) .filter(dsl::epoch.eq(epoch)) - .get_result(tx)?; + .filter(dsl::committed.eq(true)) + .get_result::(&mut self.conn)?; + Ok(share) } } diff --git a/bootstore/src/messages.rs b/bootstore/src/messages.rs index 95a9023cd67..193474cb9bd 100644 --- a/bootstore/src/messages.rs +++ b/bootstore/src/messages.rs @@ -9,6 +9,7 @@ use serde::{Deserialize, Serialize}; use uuid::Uuid; use vsss_rs::Share; +use crate::db; use crate::trust_quorum::SerializableShareDistribution; /// A request sent to a [`Node`] from another [`Node`] or a [`Coordinator`]. @@ -110,4 +111,13 @@ pub enum NodeError { {rack_uuid}, epoch: {epoch}" )] MissingKeySharePrepare { rack_uuid: Uuid, epoch: i32 }, + + #[error("DB error: {0}")] + Db(String), +} + +impl From for NodeError { + fn from(err: db::Error) -> Self { + NodeError::Db(err.to_string()) + } } diff --git a/bootstore/src/node.rs b/bootstore/src/node.rs index 183cb5c46bb..0383f26b15e 100644 --- a/bootstore/src/node.rs +++ b/bootstore/src/node.rs @@ -11,6 +11,7 @@ use slog::Logger; use sprockets_host::Ed25519Certificate; use uuid::Uuid; +use crate::db; use crate::db::Db; use crate::messages::*; use crate::trust_quorum::SerializableShareDistribution; @@ -84,9 +85,20 @@ impl Node { fn handle_get_share( &mut self, - _epoch: i32, + epoch: i32, ) -> Result { - unimplemented!(); + match self.db.get_committed_share(epoch) { + Ok(share) => { + Ok(NodeOpResult::Share { epoch, share: share.0.share }) + } + Err(err) => { + if let db::Error::Db(diesel::result::Error::NotFound) = err { + Err(NodeError::KeyShareDoesNotExist { epoch }) + } else { + Err(err.into()) + } + } + } } fn handle_initialize( From 827ee53f1ac7063b9cc5fdb75053e5d720332b75 Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Wed, 7 Sep 2022 17:58:38 +0000 Subject: [PATCH 03/16] Add rack table and allow multiple connections --- bootstore/src/db/mod.rs | 134 ++++++++++++++++++++++++++++-------- bootstore/src/db/models.rs | 24 ++++--- bootstore/src/db/schema.rs | 6 ++ bootstore/src/db/schema.sql | 6 ++ bootstore/src/node.rs | 15 ++-- 5 files changed, 143 insertions(+), 42 deletions(-) diff --git a/bootstore/src/db/mod.rs b/bootstore/src/db/mod.rs index 9e7b3adf84d..d0e8af24bbb 100644 --- a/bootstore/src/db/mod.rs +++ b/bootstore/src/db/mod.rs @@ -16,6 +16,7 @@ use slog::{info, o}; use crate::trust_quorum::SerializableShareDistribution; use models::KeyShare; +use models::Rack; use models::Sha3_256Digest; use models::Share; use sha3::{Digest, Sha3_256}; @@ -38,21 +39,32 @@ pub enum Error { #[error("No shares have been committed")] NoSharesCommitted, + + #[error("DB invariant violated: {0}")] + DbInvariant(String), + + #[error("Already initialized with rack uuid: {0}")] + AlreadyInitialized(String), } pub struct Db { - // Temporary until the using code is written - #[allow(dead_code)] log: Logger, - // Temporary until the using code is written - #[allow(dead_code)] - conn: SqliteConnection, + + // We use a String, because sqlite requires paths to be UTF-8, + // and strings guarantee that constraint. + path: String, } // Temporary until the using code is written #[allow(dead_code)] impl Db { - pub fn open(log: &Logger, path: &str) -> Result { + /// Initialize the database + /// + /// Create tables if they don't exist and set pragmas + pub fn init( + log: &Logger, + path: &str, + ) -> Result<(Db, SqliteConnection), Error> { let schema = include_str!("./schema.sql"); let log = log.new(o!( "component" => "BootstoreDb" @@ -79,11 +91,21 @@ impl Db { // Create tables c.batch_execute(&schema)?; - Ok(Db { log, conn: c }) + Ok((Db { log, path: path.to_string() }, c)) + } + + pub fn new_connection(&self) -> Result { + info!( + self.log, + "Creating a new connection to database {:?}", self.path + ); + SqliteConnection::establish(&self.path) + .map_err(|err| Error::DbOpen { path: self.path.clone(), err }) } pub fn prepare_share( - &mut self, + &self, + tx: &mut SqliteConnection, epoch: i32, share: SerializableShareDistribution, ) -> Result<(), Error> { @@ -101,19 +123,18 @@ impl Db { share_digest, committed: false, }; - diesel::insert_into(dsl::key_shares) - .values(&prepare) - .execute(&mut self.conn)?; + diesel::insert_into(dsl::key_shares).values(&prepare).execute(tx)?; Ok(()) } pub fn commit_share( - &mut self, + &self, + conn: &mut SqliteConnection, epoch: i32, digest: sprockets_common::Sha3_256Digest, ) -> Result<(), Error> { use schema::key_shares::dsl; - self.conn.immediate_transaction(|tx| { + conn.immediate_transaction(|tx| { // We only want to commit if the share digest of the commit is the // same as that of the prepare. let prepare_digest = dsl::key_shares @@ -132,28 +153,83 @@ impl Db { }) } + pub fn initialize(&self, conn: &mut SqliteConnection) -> Result<(), Error> { + conn.immediate_transaction(|tx| { + if self.is_initialized(tx)? { + let uuid = self.get_rack_uuid(tx)?; + return Err(Error::AlreadyInitialized(uuid)); + } + + Ok(()) + }) + } + pub fn get_latest_committed_share( - &mut self, + &self, + conn: &mut SqliteConnection, ) -> Result<(i32, Share), Error> { use schema::key_shares::dsl; let res = dsl::key_shares .select((dsl::epoch, dsl::share)) .order(dsl::epoch.desc()) .filter(dsl::committed.eq(true)) - .get_result::<(i32, Share)>(&mut self.conn)?; + .get_result::<(i32, Share)>(conn)?; Ok(res) } - pub fn get_committed_share(&mut self, epoch: i32) -> Result { + pub fn get_committed_share( + &self, + conn: &mut SqliteConnection, + epoch: i32, + ) -> Result { use schema::key_shares::dsl; let share = dsl::key_shares .select(dsl::share) .filter(dsl::epoch.eq(epoch)) .filter(dsl::committed.eq(true)) - .get_result::(&mut self.conn)?; + .get_result::(conn)?; Ok(share) } + + /// Return true if there is a commit for epoch 0, false otherwise + pub fn is_initialized( + &self, + tx: &mut SqliteConnection, + ) -> Result { + use schema::key_shares::dsl; + Ok(dsl::key_shares + .select(dsl::epoch) + .filter(dsl::epoch.eq(0)) + .filter(dsl::committed.eq(true)) + .get_result::(tx) + .optional()? + .is_some()) + } + + /// Return true if the persisted rack uuid matches `uuid`, false otherwise + pub fn has_valid_rack_uuid( + &self, + uuid: &str, + tx: &mut SqliteConnection, + ) -> Result { + use schema::rack::dsl; + + Ok(dsl::rack + .filter(dsl::uuid.eq(uuid)) + .get_result::(tx) + .optional()? + .is_some()) + } + + pub fn get_rack_uuid( + &self, + tx: &mut SqliteConnection, + ) -> Result { + use schema::rack::dsl; + + Ok(dsl::rack.select(dsl::uuid).get_result::(tx)?) + } } #[cfg(test)] @@ -186,15 +262,15 @@ mod tests { fn simple_prepare_insert_and_query() { use schema::key_shares::dsl; let logctx = test_setup_log("test_db"); - let mut db = Db::open(&logctx.log, ":memory:").unwrap(); + let (db, mut conn) = Db::init(&logctx.log, ":memory:").unwrap(); let shares = new_shares(); let epoch = 0; let expected: SerializableShareDistribution = shares[0].clone().into(); - db.prepare_share(epoch, expected.clone()).unwrap(); + db.prepare_share(&mut conn, epoch, expected.clone()).unwrap(); let (share, committed) = dsl::key_shares .select((dsl::share, dsl::committed)) .filter(dsl::epoch.eq(epoch)) - .get_result::<(Share, bool)>(&mut db.conn) + .get_result::<(Share, bool)>(&mut conn) .unwrap(); assert_eq!(share.0, expected); assert_eq!(committed, false); @@ -204,10 +280,10 @@ mod tests { #[test] fn commit_fails_without_corresponding_prepare() { let logctx = test_setup_log("test_db"); - let mut db = Db::open(&logctx.log, ":memory:").unwrap(); + let (db, mut conn) = Db::init(&logctx.log, ":memory:").unwrap(); let epoch = 0; let digest = sprockets_common::Sha3_256Digest::default(); - let err = db.commit_share(epoch, digest).unwrap_err(); + let err = db.commit_share(&mut conn, epoch, digest).unwrap_err(); assert!(matches!(err, Error::Db(diesel::result::Error::NotFound))); logctx.cleanup_successful(); } @@ -215,13 +291,13 @@ mod tests { #[test] fn commit_fails_with_invalid_hash() { let logctx = test_setup_log("test_db"); - let mut db = Db::open(&logctx.log, ":memory:").unwrap(); + let (db, mut conn) = Db::init(&logctx.log, ":memory:").unwrap(); let shares = new_shares(); let epoch = 0; let expected: SerializableShareDistribution = shares[0].clone().into(); - db.prepare_share(epoch, expected.clone()).unwrap(); + db.prepare_share(&mut conn, epoch, expected.clone()).unwrap(); let digest = sprockets_common::Sha3_256Digest::default(); - let err = db.commit_share(epoch, digest).unwrap_err(); + let err = db.commit_share(&mut conn, epoch, digest).unwrap_err(); assert!(matches!(err, Error::CommitHashMismatch { epoch: _ })); logctx.cleanup_successful(); } @@ -229,24 +305,24 @@ mod tests { #[test] fn commit_succeeds_with_correct_hash() { let logctx = test_setup_log("test_db"); - let mut db = Db::open(&logctx.log, ":memory:").unwrap(); + let (db, mut conn) = Db::init(&logctx.log, ":memory:").unwrap(); let shares = new_shares(); let epoch = 0; let expected: SerializableShareDistribution = shares[0].clone().into(); - db.prepare_share(epoch, expected.clone()).unwrap(); + db.prepare_share(&mut conn, epoch, expected.clone()).unwrap(); let val = bcs::to_bytes(&expected).unwrap(); let digest = sprockets_common::Sha3_256Digest(Sha3_256::digest(&val).into()) .into(); - assert!(db.commit_share(epoch, digest).is_ok()); + assert!(db.commit_share(&mut conn, epoch, digest).is_ok()); // Ensure `committed = true` use schema::key_shares::dsl; let committed = dsl::key_shares .select(dsl::committed) .filter(dsl::epoch.eq(epoch)) - .get_result::(&mut db.conn) + .get_result::(&mut conn) .unwrap(); assert_eq!(true, committed); logctx.cleanup_successful(); diff --git a/bootstore/src/db/models.rs b/bootstore/src/db/models.rs index f567b35a8f5..3ef2ebd09c6 100644 --- a/bootstore/src/db/models.rs +++ b/bootstore/src/db/models.rs @@ -8,6 +8,7 @@ use diesel::deserialize::FromSql; use diesel::prelude::*; use diesel::serialize::ToSql; use diesel::FromSqlRow; +use uuid::Uuid; use super::macros::array_new_type; use super::macros::bcs_new_type; @@ -27,14 +28,21 @@ pub struct KeyShare { pub committed: bool, } -// A chacha20poly1305 secret encrypted by a chacha20poly1305 secret key -// derived from the rack secret for the given epoch with the given salt -// -// The epoch informs which rack secret should be used to derive the -// encryptiong key used to encrypt this root secret. -// -// TODO-security: We probably don't want to log even the encrypted secret, but -// it's likely useful for debugging right now. +/// Information about the rack +#[derive(Debug, Queryable, Insertable)] +#[diesel(table_name = rack)] +pub struct Rack { + pub uuid: String, +} + +/// A chacha20poly1305 secret encrypted by a chacha20poly1305 secret key +/// derived from the rack secret for the given epoch with the given salt +/// +/// The epoch informs which rack secret should be used to derive the +/// encryptiong key used to encrypt this root secret. +/// +/// TODO-security: We probably don't want to log even the encrypted secret, but +/// it's likely useful for debugging right now. #[derive(Debug, Queryable, Insertable)] pub struct EncryptedRootSecret { /// The epoch of the rack secret rotation or rack reconfiguration diff --git a/bootstore/src/db/schema.rs b/bootstore/src/db/schema.rs index dbf72dcbc6c..172e79d6ed1 100644 --- a/bootstore/src/db/schema.rs +++ b/bootstore/src/db/schema.rs @@ -13,6 +13,12 @@ table! { } } +table! { + rack(uuid) { + uuid -> Text, + } +} + table! { encrypted_root_secrets(epoch) { epoch -> Integer, diff --git a/bootstore/src/db/schema.sql b/bootstore/src/db/schema.sql index 4b0a0257e6b..e755919aece 100644 --- a/bootstore/src/db/schema.sql +++ b/bootstore/src/db/schema.sql @@ -7,6 +7,12 @@ CREATE TABLE IF NOT EXISTS key_shares ( PRIMARY KEY (epoch) ); +CREATE TABLE IF NOT EXISTS rack ( + uuid TEXT NOT NULL, + + PRIMARY KEY (uuid) +); + CREATE TABLE IF NOT EXISTS encrypted_root_secrets ( epoch INTEGER NOT NULL, salt BLOB NOT NULL, diff --git a/bootstore/src/node.rs b/bootstore/src/node.rs index 0383f26b15e..8d1e53c1b5f 100644 --- a/bootstore/src/node.rs +++ b/bootstore/src/node.rs @@ -7,6 +7,7 @@ //! Most logic is contained here, but networking sits on top. //! This allows easier testing of clusters and failure situations. +use diesel::SqliteConnection; use slog::Logger; use sprockets_host::Ed25519Certificate; use uuid::Uuid; @@ -41,13 +42,17 @@ pub struct Config { pub struct Node { config: Config, db: Db, + + // For now we just use a single connection + // This *may* change once there is an async connection handling mechanism on top. + conn: SqliteConnection, } impl Node { /// Create a new Node pub fn new(config: Config) -> Node { - let db = Db::open(&config.log, &config.db_path).unwrap(); - Node { config, db } + let (db, conn) = Db::init(&config.log, &config.db_path).unwrap(); + Node { config, db, conn } } /// Handle a message received over sprockets from another [`Node`] or @@ -87,7 +92,7 @@ impl Node { &mut self, epoch: i32, ) -> Result { - match self.db.get_committed_share(epoch) { + match self.db.get_committed_share(&mut self.conn, epoch) { Ok(share) => { Ok(NodeOpResult::Share { epoch, share: share.0.share }) } @@ -103,8 +108,8 @@ impl Node { fn handle_initialize( &mut self, - _rack_uuid: Uuid, - _share_distribution: SerializableShareDistribution, + rack_uuid: Uuid, + share_distribution: SerializableShareDistribution, ) -> Result { unimplemented!(); } From a2240e8a83af393623681f9e28b259465dea1fbf Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Wed, 7 Sep 2022 21:25:49 +0000 Subject: [PATCH 04/16] Add support for rack init --- bootstore/src/db/error.rs | 33 ++++++++++++ bootstore/src/db/mod.rs | 108 ++++++++++++++++++++++--------------- bootstore/src/db/models.rs | 26 ++++++++- bootstore/src/messages.rs | 3 +- bootstore/src/node.rs | 3 +- 5 files changed, 125 insertions(+), 48 deletions(-) create mode 100644 bootstore/src/db/error.rs diff --git a/bootstore/src/db/error.rs b/bootstore/src/db/error.rs new file mode 100644 index 00000000000..7d365e27af4 --- /dev/null +++ b/bootstore/src/db/error.rs @@ -0,0 +1,33 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! DB related errors + +use diesel::result::ConnectionError; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("Failed to open db connection to {path}: {err}")] + DbOpen { path: String, err: ConnectionError }, + + #[error(transparent)] + Db(#[from] diesel::result::Error), + + #[error(transparent)] + Bcs(#[from] bcs::Error), + + // Temporary until the using code is written + #[allow(dead_code)] + #[error("Share commit for {epoch} does not match prepare")] + CommitHashMismatch { epoch: i32 }, + + #[error("No shares have been committed")] + NoSharesCommitted, + + #[error("DB invariant violated: {0}")] + DbInvariant(String), + + #[error("Already initialized with rack uuid: {0}")] + AlreadyInitialized(String), +} diff --git a/bootstore/src/db/mod.rs b/bootstore/src/db/mod.rs index d0e8af24bbb..54bf2e5921d 100644 --- a/bootstore/src/db/mod.rs +++ b/bootstore/src/db/mod.rs @@ -4,6 +4,7 @@ //! Database layer for the bootstore +mod error; mod macros; mod models; mod schema; @@ -13,39 +14,14 @@ use diesel::prelude::*; use diesel::SqliteConnection; use slog::Logger; use slog::{info, o}; +use uuid::Uuid; use crate::trust_quorum::SerializableShareDistribution; +pub(crate) use error::Error; use models::KeyShare; use models::Rack; use models::Sha3_256Digest; use models::Share; -use sha3::{Digest, Sha3_256}; - -#[derive(thiserror::Error, Debug)] -pub enum Error { - #[error("Failed to open db connection to {path}: {err}")] - DbOpen { path: String, err: ConnectionError }, - - #[error(transparent)] - Db(#[from] diesel::result::Error), - - #[error(transparent)] - Bcs(#[from] bcs::Error), - - // Temporary until the using code is written - #[allow(dead_code)] - #[error("Share commit for {epoch} does not match prepare")] - CommitHashMismatch { epoch: i32 }, - - #[error("No shares have been committed")] - NoSharesCommitted, - - #[error("DB invariant violated: {0}")] - DbInvariant(String), - - #[error("Already initialized with rack uuid: {0}")] - AlreadyInitialized(String), -} pub struct Db { log: Logger, @@ -111,18 +87,7 @@ impl Db { ) -> Result<(), Error> { info!(self.log, "Writing key share prepare for {epoch} to the Db"); use schema::key_shares::dsl; - // We save the digest so we don't have to deserialize and recompute most of the time. - // We'd only want to do that for a consistency check occasionally. - let val = bcs::to_bytes(&share)?; - let share_digest = - sprockets_common::Sha3_256Digest(Sha3_256::digest(&val).into()) - .into(); - let prepare = KeyShare { - epoch, - share: Share(share), - share_digest, - committed: false, - }; + let prepare = KeyShare::new(epoch, share)?; diesel::insert_into(dsl::key_shares).values(&prepare).execute(tx)?; Ok(()) } @@ -153,17 +118,71 @@ impl Db { }) } - pub fn initialize(&self, conn: &mut SqliteConnection) -> Result<(), Error> { + pub fn initialize( + &self, + conn: &mut SqliteConnection, + rack_uuid: &Uuid, + share_distribution: SerializableShareDistribution, + ) -> Result<(), Error> { conn.immediate_transaction(|tx| { if self.is_initialized(tx)? { - let uuid = self.get_rack_uuid(tx)?; + // If the rack is initialized, a rack uuid must exist + let uuid = self.get_rack_uuid(tx)?.unwrap(); return Err(Error::AlreadyInitialized(uuid)); } + let new_uuid = rack_uuid.to_string(); + match self.initialize_rack_uuid(tx, &new_uuid)? { + Some(old_uuid) => { + info!(self.log, "Re-Initializing Rack: Old UUID: {old_uuid}, New UUID: {new_uuid}"); + self.update_prepare_for_epoch_0(tx, share_distribution) + + }, + None => { + info!(self.log, "Initializing Rack for the first time with UUID: {new_uuid}"); + self.prepare_share(tx, 0, share_distribution) + } + } - Ok(()) }) } + // During rack initialization we set the rack UUID. + // + // We only allow rewriting this UUID in when the rack is not initialized, + // and so we only call this from the `initialize` method. + // + // Since we only allow a single row, we just delete the existing rows + // and insert the new one. + // + // Return the old rack UUID if one exists + fn initialize_rack_uuid( + &self, + tx: &mut SqliteConnection, + new_uuid: &str, + ) -> Result, Error> { + use schema::rack::dsl; + let old_uuid = self.get_rack_uuid(tx)?; + diesel::delete(dsl::rack).execute(tx)?; + diesel::insert_into(dsl::rack) + .values(dsl::uuid.eq(new_uuid)) + .execute(tx)?; + Ok(old_uuid) + } + + // Overwrite a share for epoch 0 during rack initialization + // + // This method should only be called from dinitialize. + fn update_prepare_for_epoch_0( + &self, + tx: &mut SqliteConnection, + share_distribution: SerializableShareDistribution, + ) -> Result<(), Error> { + use schema::key_shares::dsl; + let prepare = KeyShare::new(0, share_distribution)?; + diesel::update(dsl::key_shares).set(prepare).execute(tx)?; + Ok(()) + } + pub fn get_latest_committed_share( &self, conn: &mut SqliteConnection, @@ -225,10 +244,10 @@ impl Db { pub fn get_rack_uuid( &self, tx: &mut SqliteConnection, - ) -> Result { + ) -> Result, Error> { use schema::rack::dsl; - Ok(dsl::rack.select(dsl::uuid).get_result::(tx)?) + Ok(dsl::rack.select(dsl::uuid).get_result::(tx).optional()?) } } @@ -237,6 +256,7 @@ mod tests { use super::*; use crate::trust_quorum::{RackSecret, ShareDistribution}; use omicron_test_utils::dev::test_setup_log; + use sha3::{Digest, Sha3_256}; // TODO: Fill in with actual member certs fn new_shares() -> Vec { diff --git a/bootstore/src/db/models.rs b/bootstore/src/db/models.rs index 3ef2ebd09c6..d75697cb521 100644 --- a/bootstore/src/db/models.rs +++ b/bootstore/src/db/models.rs @@ -8,11 +8,12 @@ use diesel::deserialize::FromSql; use diesel::prelude::*; use diesel::serialize::ToSql; use diesel::FromSqlRow; -use uuid::Uuid; +use sha3::{Digest, Sha3_256}; use super::macros::array_new_type; use super::macros::bcs_new_type; use super::schema::*; +use super::Error; use crate::trust_quorum::SerializableShareDistribution; bcs_new_type!(Share, SerializableShareDistribution); @@ -20,7 +21,8 @@ bcs_new_type!(Share, SerializableShareDistribution); /// When a [`KeyShareParepare`] message arrives it is stored in a [`KeyShare`] /// When a [`KeyShareCommit`] message arrives the `committed` field/column is /// set to true. -#[derive(Debug, Queryable, Insertable)] +#[derive(Debug, Queryable, Insertable, Identifiable, AsChangeset)] +#[diesel(primary_key(epoch))] pub struct KeyShare { pub epoch: i32, pub share: Share, @@ -28,6 +30,26 @@ pub struct KeyShare { pub committed: bool, } +impl KeyShare { + pub fn new( + epoch: i32, + share: SerializableShareDistribution, + ) -> Result { + // We save the digest so we don't have to deserialize and recompute most of the time. + // We'd only want to do that for a consistency check occasionally. + let val = bcs::to_bytes(&share)?; + let share_digest = + sprockets_common::Sha3_256Digest(Sha3_256::digest(&val).into()) + .into(); + Ok(KeyShare { + epoch, + share: Share(share), + share_digest, + committed: false, + }) + } +} + /// Information about the rack #[derive(Debug, Queryable, Insertable)] #[diesel(table_name = rack)] diff --git a/bootstore/src/messages.rs b/bootstore/src/messages.rs index 193474cb9bd..6d758fd80de 100644 --- a/bootstore/src/messages.rs +++ b/bootstore/src/messages.rs @@ -83,7 +83,8 @@ pub enum NodeOpResult { /// A key share for a given epoch as requested by [`PeerRequest::GetShare`] Share { epoch: i32, share: Share }, - /// An ack for the most recent coordinator message + /// An ack for the most recent coordinator message, where there is no data + /// to return CoordinatorAck, } diff --git a/bootstore/src/node.rs b/bootstore/src/node.rs index 8d1e53c1b5f..e455d2cde78 100644 --- a/bootstore/src/node.rs +++ b/bootstore/src/node.rs @@ -111,7 +111,8 @@ impl Node { rack_uuid: Uuid, share_distribution: SerializableShareDistribution, ) -> Result { - unimplemented!(); + self.db.initialize(&mut self.conn, &rack_uuid, share_distribution)?; + Ok(NodeOpResult::CoordinatorAck) } fn handle_key_share_prepare( From 735d902aac6d4294480368b47d87598d01061c3b Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Wed, 7 Sep 2022 22:03:44 +0000 Subject: [PATCH 05/16] Add test for rack initialization --- bootstore/src/db/mod.rs | 11 ++++--- bootstore/src/db/models.rs | 6 ++++ bootstore/src/messages.rs | 1 + bootstore/src/node.rs | 60 ++++++++++++++++++++++++++++++++++---- 4 files changed, 68 insertions(+), 10 deletions(-) diff --git a/bootstore/src/db/mod.rs b/bootstore/src/db/mod.rs index 54bf2e5921d..e6c88cef988 100644 --- a/bootstore/src/db/mod.rs +++ b/bootstore/src/db/mod.rs @@ -6,7 +6,7 @@ mod error; mod macros; -mod models; +pub(crate) mod models; mod schema; use diesel::connection::SimpleConnection; @@ -85,7 +85,10 @@ impl Db { epoch: i32, share: SerializableShareDistribution, ) -> Result<(), Error> { - info!(self.log, "Writing key share prepare for {epoch} to the Db"); + info!( + self.log, + "Writing key share prepare for epoch {epoch} to the Db" + ); use schema::key_shares::dsl; let prepare = KeyShare::new(epoch, share)?; diesel::insert_into(dsl::key_shares).values(&prepare).execute(tx)?; @@ -252,14 +255,14 @@ impl Db { } #[cfg(test)] -mod tests { +pub mod tests { use super::*; use crate::trust_quorum::{RackSecret, ShareDistribution}; use omicron_test_utils::dev::test_setup_log; use sha3::{Digest, Sha3_256}; // TODO: Fill in with actual member certs - fn new_shares() -> Vec { + pub fn new_shares() -> Vec { let member_device_id_certs = vec![]; let rack_secret_threshold = 3; let total_shares = 5; diff --git a/bootstore/src/db/models.rs b/bootstore/src/db/models.rs index d75697cb521..6e75198d819 100644 --- a/bootstore/src/db/models.rs +++ b/bootstore/src/db/models.rs @@ -101,3 +101,9 @@ impl From for Sha3_256Digest { Sha3_256Digest(digest.0) } } + +impl From for sprockets_common::Sha3_256Digest { + fn from(digest: Sha3_256Digest) -> Self { + sprockets_common::Sha3_256Digest(digest.0) + } +} diff --git a/bootstore/src/messages.rs b/bootstore/src/messages.rs index 6d758fd80de..493fca7ffe2 100644 --- a/bootstore/src/messages.rs +++ b/bootstore/src/messages.rs @@ -113,6 +113,7 @@ pub enum NodeError { )] MissingKeySharePrepare { rack_uuid: Uuid, epoch: i32 }, + //TODO: Should probably pull out the variants for better matching #[error("DB error: {0}")] Db(String), } diff --git a/bootstore/src/node.rs b/bootstore/src/node.rs index e455d2cde78..bd96b0ec0e7 100644 --- a/bootstore/src/node.rs +++ b/bootstore/src/node.rs @@ -9,7 +9,7 @@ use diesel::SqliteConnection; use slog::Logger; -use sprockets_host::Ed25519Certificate; +//use sprockets_host::Ed25519Certificate; use uuid::Uuid; use crate::db; @@ -22,8 +22,8 @@ pub struct Config { log: Logger, db_path: String, // TODO: This will live inside the certificate eventually - _serial_number: String, - _device_id_cert: Ed25519Certificate, + //_serial_number: String, + //_device_id_cert: Ed25519Certificate, } /// A node of the bootstore @@ -69,7 +69,7 @@ impl Node { let result = match req.op { NodeOp::GetShare { epoch } => self.handle_get_share(epoch), NodeOp::Initialize { rack_uuid, share_distribution } => { - self.handle_initialize(rack_uuid, share_distribution) + self.handle_initialize(&rack_uuid, share_distribution) } NodeOp::KeySharePrepare { rack_uuid, @@ -108,10 +108,10 @@ impl Node { fn handle_initialize( &mut self, - rack_uuid: Uuid, + rack_uuid: &Uuid, share_distribution: SerializableShareDistribution, ) -> Result { - self.db.initialize(&mut self.conn, &rack_uuid, share_distribution)?; + self.db.initialize(&mut self.conn, rack_uuid, share_distribution)?; Ok(NodeOpResult::CoordinatorAck) } @@ -132,3 +132,51 @@ impl Node { unimplemented!(); } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::db::models::KeyShare; + use crate::db::tests::new_shares; + use omicron_test_utils::dev::test_setup_log; + use uuid::Uuid; + + #[test] + fn initialize_can_run_again_if_epoch_0_is_not_committed() { + let logctx = test_setup_log("test_db"); + let config = + Config { log: logctx.log.clone(), db_path: ":memory:".to_string() }; + let mut node = Node::new(config); + let shares = new_shares(); + let expected = Ok(NodeOpResult::CoordinatorAck); + assert_eq!( + expected, + node.handle_initialize(&Uuid::new_v4(), shares[0].clone().into()) + ); + // We can re-initialize with a new uuid + let rack_uuid = Uuid::new_v4(); + assert_eq!( + expected, + node.handle_initialize(&rack_uuid, shares[0].clone().into()) + ); + + // We can re-initialize with a the same uuid + assert_eq!( + expected, + node.handle_initialize(&rack_uuid, shares[0].clone().into()) + ); + + // Committing the key share for epoch 0 means we cannot initialize again + let epoch = 0; + let prepare = KeyShare::new(epoch, shares[0].clone().into()).unwrap(); + node.db + .commit_share(&mut node.conn, epoch, prepare.share_digest.into()) + .unwrap(); + + assert!(node + .handle_initialize(&rack_uuid, shares[0].clone().into()) + .is_err()); + + logctx.cleanup_successful(); + } +} From 16bc0512fc98b0918fa72f76d772485d71ce894f Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Wed, 7 Sep 2022 23:35:25 +0000 Subject: [PATCH 06/16] Implement KeySharePrepare handling --- bootstore/src/db/error.rs | 15 ++++++ bootstore/src/db/macros.rs | 8 ++- bootstore/src/db/mod.rs | 100 +++++++++++++++++++++++++++++++------ bootstore/src/db/models.rs | 4 +- bootstore/src/messages.rs | 6 +++ bootstore/src/node.rs | 25 ++++++++-- 6 files changed, 135 insertions(+), 23 deletions(-) diff --git a/bootstore/src/db/error.rs b/bootstore/src/db/error.rs index 7d365e27af4..f845fb16001 100644 --- a/bootstore/src/db/error.rs +++ b/bootstore/src/db/error.rs @@ -30,4 +30,19 @@ pub enum Error { #[error("Already initialized with rack uuid: {0}")] AlreadyInitialized(String), + + #[error( + "Tried to Prepare a KeyShare with epoch {epoch}, but found one + with later epoch {stored_epoch}" + )] + OldKeySharePrepare { epoch: i32, stored_epoch: i32 }, + + #[error("A distinct key share already exists for epoch {epoch}")] + KeyShareAlreadyExists { epoch: i32 }, + + #[error("Rack UUID mismatch: Expected: {expected}, Actual: {actual}")] + RackUuidMismatch { expected: String, actual: String }, + + #[error("Rack not yet initialized")] + RackNotInitialized, } diff --git a/bootstore/src/db/macros.rs b/bootstore/src/db/macros.rs index 3cb7cb48221..e471f1589a4 100644 --- a/bootstore/src/db/macros.rs +++ b/bootstore/src/db/macros.rs @@ -4,12 +4,16 @@ //! Macros used to create ToSql and FromSql impls required by Diesel -/// Shamelessly stolen from buildomat/common/src/db.rs +/// Shamelessly cribbed and mutated from buildomat/common/src/db.rs /// Thanks @jmc macro_rules! bcs_new_type { ($name:ident, $mytype:ty) => { #[derive( - Clone, Debug, FromSqlRow, diesel::expression::AsExpression, + Clone, + PartialEq, + Debug, + FromSqlRow, + diesel::expression::AsExpression, )] #[diesel(sql_type = diesel::sql_types::Binary)] pub struct $name(pub $mytype); diff --git a/bootstore/src/db/mod.rs b/bootstore/src/db/mod.rs index e6c88cef988..d0911794cee 100644 --- a/bootstore/src/db/mod.rs +++ b/bootstore/src/db/mod.rs @@ -79,20 +79,75 @@ impl Db { .map_err(|err| Error::DbOpen { path: self.path.clone(), err }) } + /// Write an uncommitted `KeyShare` into the database. + /// + /// This command is idempotent. + /// + /// The rules for inserting a KeyShare are: + /// 1. A KeyShare for the given epoch does not exist unless it is identical + /// 2. A KeyShare for a later epoch does not exist + /// + /// Calling this method with an epoch of 0 is a programmer error so we + /// assert. pub fn prepare_share( &self, - tx: &mut SqliteConnection, + conn: &mut SqliteConnection, + rack_uuid: &Uuid, epoch: i32, - share: SerializableShareDistribution, + share_distribution: SerializableShareDistribution, ) -> Result<(), Error> { - info!( - self.log, - "Writing key share prepare for epoch {epoch} to the Db" - ); + assert_ne!(0, epoch); use schema::key_shares::dsl; - let prepare = KeyShare::new(epoch, share)?; - diesel::insert_into(dsl::key_shares).values(&prepare).execute(tx)?; - Ok(()) + let prepare = KeyShare::new(epoch, share_distribution)?; + let rack_uuid = rack_uuid.to_string(); + conn.immediate_transaction(|tx| { + // Has the rack been initialized? + if !self.is_initialized(tx)? { + return Err(Error::RackNotInitialized); + } + + // Is the rack id valid? + // If the rack is initialized, a rack uuid must exist, so unwrap is safe. + let stored_rack_uuid = self.get_rack_uuid(tx)?.unwrap(); + if rack_uuid != stored_rack_uuid { + return Err(Error::RackUuidMismatch { + expected: rack_uuid, + actual: stored_rack_uuid, + }); + } + + // Check for idempotence + if let Some(stored_key_share) = dsl::key_shares + .filter(dsl::epoch.eq(epoch)) + .get_result::(tx) + .optional()? + { + if prepare == stored_key_share { + return Ok(()); + } else { + return Err(Error::KeyShareAlreadyExists { epoch }); + } + } + + // We don't allow shares for old epochs + if let Some(stored_epoch) = dsl::key_shares + .select(dsl::epoch) + .filter(dsl::epoch.gt(epoch)) + .get_result::(tx) + .optional()? + { + return Err(Error::OldKeySharePrepare { epoch, stored_epoch }); + } + + info!( + self.log, + "Writing key share prepare for epoch {epoch} to the Db" + ); + diesel::insert_into(dsl::key_shares) + .values(&prepare) + .execute(tx)?; + Ok(()) + }) } pub fn commit_share( @@ -142,13 +197,28 @@ impl Db { }, None => { info!(self.log, "Initializing Rack for the first time with UUID: {new_uuid}"); - self.prepare_share(tx, 0, share_distribution) + self.insert_prepare(tx, 0, share_distribution) } } }) } + // Insert an uncommitted key share for a given epoch This should be called + // inside a transaction by the caller so that the given checks can be + // performed. + fn insert_prepare( + &self, + tx: &mut SqliteConnection, + epoch: i32, + share_distribution: SerializableShareDistribution, + ) -> Result<(), Error> { + use schema::key_shares::dsl; + let prepare = KeyShare::new(epoch, share_distribution)?; + diesel::insert_into(dsl::key_shares).values(&prepare).execute(tx)?; + Ok(()) + } + // During rack initialization we set the rack UUID. // // We only allow rewriting this UUID in when the rack is not initialized, @@ -174,7 +244,7 @@ impl Db { // Overwrite a share for epoch 0 during rack initialization // - // This method should only be called from dinitialize. + // This method should only be called from initialize. fn update_prepare_for_epoch_0( &self, tx: &mut SqliteConnection, @@ -232,8 +302,8 @@ impl Db { /// Return true if the persisted rack uuid matches `uuid`, false otherwise pub fn has_valid_rack_uuid( &self, - uuid: &str, tx: &mut SqliteConnection, + uuid: &str, ) -> Result { use schema::rack::dsl; @@ -289,7 +359,7 @@ pub mod tests { let shares = new_shares(); let epoch = 0; let expected: SerializableShareDistribution = shares[0].clone().into(); - db.prepare_share(&mut conn, epoch, expected.clone()).unwrap(); + db.insert_prepare(&mut conn, epoch, expected.clone()).unwrap(); let (share, committed) = dsl::key_shares .select((dsl::share, dsl::committed)) .filter(dsl::epoch.eq(epoch)) @@ -318,7 +388,7 @@ pub mod tests { let shares = new_shares(); let epoch = 0; let expected: SerializableShareDistribution = shares[0].clone().into(); - db.prepare_share(&mut conn, epoch, expected.clone()).unwrap(); + db.insert_prepare(&mut conn, epoch, expected.clone()).unwrap(); let digest = sprockets_common::Sha3_256Digest::default(); let err = db.commit_share(&mut conn, epoch, digest).unwrap_err(); assert!(matches!(err, Error::CommitHashMismatch { epoch: _ })); @@ -332,7 +402,7 @@ pub mod tests { let shares = new_shares(); let epoch = 0; let expected: SerializableShareDistribution = shares[0].clone().into(); - db.prepare_share(&mut conn, epoch, expected.clone()).unwrap(); + db.insert_prepare(&mut conn, epoch, expected.clone()).unwrap(); let val = bcs::to_bytes(&expected).unwrap(); let digest = diff --git a/bootstore/src/db/models.rs b/bootstore/src/db/models.rs index 6e75198d819..cd0888942a7 100644 --- a/bootstore/src/db/models.rs +++ b/bootstore/src/db/models.rs @@ -21,7 +21,9 @@ bcs_new_type!(Share, SerializableShareDistribution); /// When a [`KeyShareParepare`] message arrives it is stored in a [`KeyShare`] /// When a [`KeyShareCommit`] message arrives the `committed` field/column is /// set to true. -#[derive(Debug, Queryable, Insertable, Identifiable, AsChangeset)] +#[derive( + Debug, PartialEq, Queryable, Insertable, Identifiable, AsChangeset, +)] #[diesel(primary_key(epoch))] pub struct KeyShare { pub epoch: i32, diff --git a/bootstore/src/messages.rs b/bootstore/src/messages.rs index 493fca7ffe2..7df59211fbc 100644 --- a/bootstore/src/messages.rs +++ b/bootstore/src/messages.rs @@ -116,6 +116,12 @@ pub enum NodeError { //TODO: Should probably pull out the variants for better matching #[error("DB error: {0}")] Db(String), + + #[error( + "'KeySharePrepare' messages are not allowed for epoch 0. +Please send an 'Initialize' message" + )] + KeySharePrepareForEpoch0, } impl From for NodeError { diff --git a/bootstore/src/node.rs b/bootstore/src/node.rs index bd96b0ec0e7..2204dea22f4 100644 --- a/bootstore/src/node.rs +++ b/bootstore/src/node.rs @@ -76,7 +76,7 @@ impl Node { epoch, share_distribution, } => self.handle_key_share_prepare( - rack_uuid, + &rack_uuid, epoch, share_distribution, ), @@ -88,6 +88,7 @@ impl Node { NodeResponse { version: req.version, id: req.id, result } } + // Handle `GetShare` messages from another node fn handle_get_share( &mut self, epoch: i32, @@ -106,6 +107,7 @@ impl Node { } } + // Handle `Initialize` messages from the coordinator fn handle_initialize( &mut self, rack_uuid: &Uuid, @@ -115,15 +117,28 @@ impl Node { Ok(NodeOpResult::CoordinatorAck) } + // Handle `KeySharePrepare` messages from the coordinator fn handle_key_share_prepare( &mut self, - _rack_uuid: Uuid, - _epoch: i32, - _share_distribution: SerializableShareDistribution, + rack_uuid: &Uuid, + epoch: i32, + share_distribution: SerializableShareDistribution, ) -> Result { - unimplemented!(); + if epoch == 0 { + return Err(NodeError::KeySharePrepareForEpoch0); + } + + self.db.prepare_share( + &mut self.conn, + rack_uuid, + epoch, + share_distribution, + )?; + + Ok(NodeOpResult::CoordinatorAck) } + // Handle `KeyShareCommit` messages from the coordinator fn handle_key_share_commit( &mut self, _rack_uuid: Uuid, From 317184335bf5dece438da2c694dd4fc7d70becad Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Thu, 8 Sep 2022 00:00:12 +0000 Subject: [PATCH 07/16] Better error wrapping --- bootstore/src/db/error.rs | 31 +++++++++++++++++++++++-------- bootstore/src/db/mod.rs | 22 ++++++++++++++-------- bootstore/src/messages.rs | 11 ++--------- bootstore/src/node.rs | 15 ++------------- 4 files changed, 41 insertions(+), 38 deletions(-) diff --git a/bootstore/src/db/error.rs b/bootstore/src/db/error.rs index f845fb16001..4b75189b39e 100644 --- a/bootstore/src/db/error.rs +++ b/bootstore/src/db/error.rs @@ -4,18 +4,18 @@ //! DB related errors -use diesel::result::ConnectionError; +use serde::{Deserialize, Serialize}; -#[derive(thiserror::Error, Debug)] +#[derive(thiserror::Error, Debug, Clone, PartialEq, Serialize, Deserialize)] pub enum Error { #[error("Failed to open db connection to {path}: {err}")] - DbOpen { path: String, err: ConnectionError }, + DbOpen { path: String, err: String }, - #[error(transparent)] - Db(#[from] diesel::result::Error), + #[error("Diesel/sqlite error: {err}")] + Diesel { err: String }, - #[error(transparent)] - Bcs(#[from] bcs::Error), + #[error("BCS serialization error: {err}")] + Bcs { err: String }, // Temporary until the using code is written #[allow(dead_code)] @@ -43,6 +43,21 @@ pub enum Error { #[error("Rack UUID mismatch: Expected: {expected}, Actual: {actual}")] RackUuidMismatch { expected: String, actual: String }, - #[error("Rack not yet initialized")] + #[error("Rack not initialized")] RackNotInitialized, + + #[error("Key share for epoch {epoch} not committed")] + KeyShareNotCommitted { epoch: i32 }, +} + +impl From for Error { + fn from(err: diesel::result::Error) -> Self { + Error::Diesel { err: err.to_string() } + } +} + +impl From for Error { + fn from(err: bcs::Error) -> Self { + Error::Bcs { err: err.to_string() } + } } diff --git a/bootstore/src/db/mod.rs b/bootstore/src/db/mod.rs index d0911794cee..d94a908a2a0 100644 --- a/bootstore/src/db/mod.rs +++ b/bootstore/src/db/mod.rs @@ -46,8 +46,9 @@ impl Db { "component" => "BootstoreDb" )); info!(log, "opening database {:?}", path); - let mut c = SqliteConnection::establish(path) - .map_err(|err| Error::DbOpen { path: path.into(), err })?; + let mut c = SqliteConnection::establish(path).map_err(|err| { + Error::DbOpen { path: path.into(), err: err.to_string() } + })?; // Enable foreign key processing, which is off by default. Without // enabling this, there is no referential integrity check between @@ -75,8 +76,10 @@ impl Db { self.log, "Creating a new connection to database {:?}", self.path ); - SqliteConnection::establish(&self.path) - .map_err(|err| Error::DbOpen { path: self.path.clone(), err }) + SqliteConnection::establish(&self.path).map_err(|err| Error::DbOpen { + path: self.path.clone(), + err: err.to_string(), + }) } /// Write an uncommitted `KeyShare` into the database. @@ -279,9 +282,13 @@ impl Db { .select(dsl::share) .filter(dsl::epoch.eq(epoch)) .filter(dsl::committed.eq(true)) - .get_result::(conn)?; + .get_result::(conn) + .optional()?; - Ok(share) + match share { + Some(share) => Ok(share), + None => Err(Error::KeyShareNotCommitted { epoch }), + } } /// Return true if there is a commit for epoch 0, false otherwise @@ -376,8 +383,7 @@ pub mod tests { let (db, mut conn) = Db::init(&logctx.log, ":memory:").unwrap(); let epoch = 0; let digest = sprockets_common::Sha3_256Digest::default(); - let err = db.commit_share(&mut conn, epoch, digest).unwrap_err(); - assert!(matches!(err, Error::Db(diesel::result::Error::NotFound))); + db.commit_share(&mut conn, epoch, digest).unwrap_err(); logctx.cleanup_successful(); } diff --git a/bootstore/src/messages.rs b/bootstore/src/messages.rs index 7df59211fbc..bb1fab13dfa 100644 --- a/bootstore/src/messages.rs +++ b/bootstore/src/messages.rs @@ -90,7 +90,7 @@ pub enum NodeOpResult { /// Errors returned inside a [`NodeOpResult`] #[derive( - Debug, Clone, PartialEq, From, Serialize, Deserialize, thiserror::Error, + Debug, PartialEq, Clone, From, Serialize, Deserialize, thiserror::Error, )] pub enum NodeError { #[error("Version {0} messages are unsupported.")] @@ -113,9 +113,8 @@ pub enum NodeError { )] MissingKeySharePrepare { rack_uuid: Uuid, epoch: i32 }, - //TODO: Should probably pull out the variants for better matching #[error("DB error: {0}")] - Db(String), + Db(db::Error), #[error( "'KeySharePrepare' messages are not allowed for epoch 0. @@ -123,9 +122,3 @@ Please send an 'Initialize' message" )] KeySharePrepareForEpoch0, } - -impl From for NodeError { - fn from(err: db::Error) -> Self { - NodeError::Db(err.to_string()) - } -} diff --git a/bootstore/src/node.rs b/bootstore/src/node.rs index 2204dea22f4..ba688e71738 100644 --- a/bootstore/src/node.rs +++ b/bootstore/src/node.rs @@ -12,7 +12,6 @@ use slog::Logger; //use sprockets_host::Ed25519Certificate; use uuid::Uuid; -use crate::db; use crate::db::Db; use crate::messages::*; use crate::trust_quorum::SerializableShareDistribution; @@ -93,18 +92,8 @@ impl Node { &mut self, epoch: i32, ) -> Result { - match self.db.get_committed_share(&mut self.conn, epoch) { - Ok(share) => { - Ok(NodeOpResult::Share { epoch, share: share.0.share }) - } - Err(err) => { - if let db::Error::Db(diesel::result::Error::NotFound) = err { - Err(NodeError::KeyShareDoesNotExist { epoch }) - } else { - Err(err.into()) - } - } - } + let share = self.db.get_committed_share(&mut self.conn, epoch)?; + Ok(NodeOpResult::Share { epoch, share: share.0.share }) } // Handle `Initialize` messages from the coordinator From 9de324928f609229b797d26d1a6035969a411d9d Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Thu, 8 Sep 2022 00:05:42 +0000 Subject: [PATCH 08/16] more specific error handling in node test --- bootstore/src/node.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/bootstore/src/node.rs b/bootstore/src/node.rs index ba688e71738..100901f5dad 100644 --- a/bootstore/src/node.rs +++ b/bootstore/src/node.rs @@ -177,9 +177,13 @@ mod tests { .commit_share(&mut node.conn, epoch, prepare.share_digest.into()) .unwrap(); - assert!(node - .handle_initialize(&rack_uuid, shares[0].clone().into()) - .is_err()); + let expected = Err(NodeError::Db( + crate::db::Error::AlreadyInitialized(rack_uuid.to_string()), + )); + assert_eq!( + expected, + node.handle_initialize(&rack_uuid, shares[0].clone().into()) + ); logctx.cleanup_successful(); } From 3ea0e6eb0df50409908acc2e8c4a3d09d12016cf Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Thu, 8 Sep 2022 05:20:06 +0000 Subject: [PATCH 09/16] key_share_commit implemnented and positive test for reconfiguration --- bootstore/src/db/error.rs | 4 +- bootstore/src/db/mod.rs | 60 +++++++++++---------- bootstore/src/db/models.rs | 18 ++++--- bootstore/src/messages.rs | 6 ++- bootstore/src/node.rs | 104 +++++++++++++++++++++++++++++-------- test-utils/src/dev/mod.rs | 2 +- 6 files changed, 130 insertions(+), 64 deletions(-) diff --git a/bootstore/src/db/error.rs b/bootstore/src/db/error.rs index 4b75189b39e..affbdf76ccb 100644 --- a/bootstore/src/db/error.rs +++ b/bootstore/src/db/error.rs @@ -40,8 +40,8 @@ pub enum Error { #[error("A distinct key share already exists for epoch {epoch}")] KeyShareAlreadyExists { epoch: i32 }, - #[error("Rack UUID mismatch: Expected: {expected}, Actual: {actual}")] - RackUuidMismatch { expected: String, actual: String }, + #[error("Rack UUID mismatch: Expected: {expected}, Actual: {actual:?}")] + RackUuidMismatch { expected: String, actual: Option }, #[error("Rack not initialized")] RackNotInitialized, diff --git a/bootstore/src/db/mod.rs b/bootstore/src/db/mod.rs index d94a908a2a0..9e0531d10dc 100644 --- a/bootstore/src/db/mod.rs +++ b/bootstore/src/db/mod.rs @@ -19,7 +19,6 @@ use uuid::Uuid; use crate::trust_quorum::SerializableShareDistribution; pub(crate) use error::Error; use models::KeyShare; -use models::Rack; use models::Sha3_256Digest; use models::Share; @@ -102,22 +101,14 @@ impl Db { assert_ne!(0, epoch); use schema::key_shares::dsl; let prepare = KeyShare::new(epoch, share_distribution)?; - let rack_uuid = rack_uuid.to_string(); conn.immediate_transaction(|tx| { // Has the rack been initialized? if !self.is_initialized(tx)? { return Err(Error::RackNotInitialized); } - // Is the rack id valid? - // If the rack is initialized, a rack uuid must exist, so unwrap is safe. - let stored_rack_uuid = self.get_rack_uuid(tx)?.unwrap(); - if rack_uuid != stored_rack_uuid { - return Err(Error::RackUuidMismatch { - expected: rack_uuid, - actual: stored_rack_uuid, - }); - } + // Does the rack_uuid match what's stored? + self.validate_rack_uuid(tx, rack_uuid)?; // Check for idempotence if let Some(stored_key_share) = dsl::key_shares @@ -156,11 +147,15 @@ impl Db { pub fn commit_share( &self, conn: &mut SqliteConnection, + rack_uuid: &Uuid, epoch: i32, digest: sprockets_common::Sha3_256Digest, ) -> Result<(), Error> { use schema::key_shares::dsl; conn.immediate_transaction(|tx| { + // Does the rack_uuid match what's stored? + self.validate_rack_uuid(tx, rack_uuid)?; + // We only want to commit if the share digest of the commit is the // same as that of the prepare. let prepare_digest = dsl::key_shares @@ -292,10 +287,7 @@ impl Db { } /// Return true if there is a commit for epoch 0, false otherwise - pub fn is_initialized( - &self, - tx: &mut SqliteConnection, - ) -> Result { + fn is_initialized(&self, tx: &mut SqliteConnection) -> Result { use schema::key_shares::dsl; Ok(dsl::key_shares .select(dsl::epoch) @@ -306,19 +298,22 @@ impl Db { .is_some()) } - /// Return true if the persisted rack uuid matches `uuid`, false otherwise - pub fn has_valid_rack_uuid( + /// Return `Ok(())` if the persisted rack uuid matches `uuid`, or + /// `Err(Error::RackUuidMismatch{..})` otherwise. + fn validate_rack_uuid( &self, tx: &mut SqliteConnection, - uuid: &str, - ) -> Result { - use schema::rack::dsl; - - Ok(dsl::rack - .filter(dsl::uuid.eq(uuid)) - .get_result::(tx) - .optional()? - .is_some()) + rack_uuid: &Uuid, + ) -> Result<(), Error> { + let rack_uuid = rack_uuid.to_string(); + let stored_rack_uuid = self.get_rack_uuid(tx)?; + if Some(&rack_uuid) != stored_rack_uuid.as_ref() { + return Err(Error::RackUuidMismatch { + expected: rack_uuid, + actual: stored_rack_uuid, + }); + } + Ok(()) } pub fn get_rack_uuid( @@ -383,7 +378,7 @@ pub mod tests { let (db, mut conn) = Db::init(&logctx.log, ":memory:").unwrap(); let epoch = 0; let digest = sprockets_common::Sha3_256Digest::default(); - db.commit_share(&mut conn, epoch, digest).unwrap_err(); + db.commit_share(&mut conn, &Uuid::new_v4(), epoch, digest).unwrap_err(); logctx.cleanup_successful(); } @@ -394,9 +389,11 @@ pub mod tests { let shares = new_shares(); let epoch = 0; let expected: SerializableShareDistribution = shares[0].clone().into(); - db.insert_prepare(&mut conn, epoch, expected.clone()).unwrap(); + let rack_uuid = Uuid::new_v4(); + db.initialize(&mut conn, &rack_uuid, expected.clone()).unwrap(); let digest = sprockets_common::Sha3_256Digest::default(); - let err = db.commit_share(&mut conn, epoch, digest).unwrap_err(); + let err = + db.commit_share(&mut conn, &rack_uuid, epoch, digest).unwrap_err(); assert!(matches!(err, Error::CommitHashMismatch { epoch: _ })); logctx.cleanup_successful(); } @@ -408,13 +405,14 @@ pub mod tests { let shares = new_shares(); let epoch = 0; let expected: SerializableShareDistribution = shares[0].clone().into(); - db.insert_prepare(&mut conn, epoch, expected.clone()).unwrap(); + let rack_uuid = Uuid::new_v4(); + db.initialize(&mut conn, &rack_uuid, expected.clone()).unwrap(); let val = bcs::to_bytes(&expected).unwrap(); let digest = sprockets_common::Sha3_256Digest(Sha3_256::digest(&val).into()) .into(); - assert!(db.commit_share(&mut conn, epoch, digest).is_ok()); + assert!(db.commit_share(&mut conn, &rack_uuid, epoch, digest).is_ok()); // Ensure `committed = true` use schema::key_shares::dsl; diff --git a/bootstore/src/db/models.rs b/bootstore/src/db/models.rs index cd0888942a7..e8a7a966909 100644 --- a/bootstore/src/db/models.rs +++ b/bootstore/src/db/models.rs @@ -35,21 +35,27 @@ pub struct KeyShare { impl KeyShare { pub fn new( epoch: i32, - share: SerializableShareDistribution, + share_distribution: SerializableShareDistribution, ) -> Result { // We save the digest so we don't have to deserialize and recompute most of the time. // We'd only want to do that for a consistency check occasionally. - let val = bcs::to_bytes(&share)?; let share_digest = - sprockets_common::Sha3_256Digest(Sha3_256::digest(&val).into()) - .into(); + Self::share_distribution_digest(&share_distribution)?; Ok(KeyShare { epoch, - share: Share(share), + share: Share(share_distribution), share_digest, committed: false, }) } + + pub fn share_distribution_digest( + sd: &SerializableShareDistribution, + ) -> Result { + let val = bcs::to_bytes(&sd)?; + Ok(sprockets_common::Sha3_256Digest(Sha3_256::digest(&val).into()) + .into()) + } } /// Information about the rack @@ -63,7 +69,7 @@ pub struct Rack { /// derived from the rack secret for the given epoch with the given salt /// /// The epoch informs which rack secret should be used to derive the -/// encryptiong key used to encrypt this root secret. +/// encryption key used to encrypt this root secret. /// /// TODO-security: We probably don't want to log even the encrypted secret, but /// it's likely useful for debugging right now. diff --git a/bootstore/src/messages.rs b/bootstore/src/messages.rs index bb1fab13dfa..65b61dcce4a 100644 --- a/bootstore/src/messages.rs +++ b/bootstore/src/messages.rs @@ -65,7 +65,11 @@ pub enum NodeOp { /// A request from a [`Coordinator`] for the Commit phase of a /// rekey or reconfiguration - KeyShareCommit { rack_uuid: Uuid, epoch: i32 }, + KeyShareCommit { + rack_uuid: Uuid, + epoch: i32, + prepare_share_distribution_digest: sprockets_common::Sha3_256Digest, + }, } /// A response from a [`Node`] to another [`Node`] or a [`Coordinator`] diff --git a/bootstore/src/node.rs b/bootstore/src/node.rs index 100901f5dad..3b2b3d2b8cd 100644 --- a/bootstore/src/node.rs +++ b/bootstore/src/node.rs @@ -9,6 +9,7 @@ use diesel::SqliteConnection; use slog::Logger; +use sprockets_common::Sha3_256Digest; //use sprockets_host::Ed25519Certificate; use uuid::Uuid; @@ -79,9 +80,15 @@ impl Node { epoch, share_distribution, ), - NodeOp::KeyShareCommit { rack_uuid, epoch } => { - self.handle_key_share_commit(rack_uuid, epoch) - } + NodeOp::KeyShareCommit { + rack_uuid, + epoch, + prepare_share_distribution_digest, + } => self.handle_key_share_commit( + &rack_uuid, + epoch, + prepare_share_distribution_digest, + ), }; NodeResponse { version: req.version, id: req.id, result } @@ -130,10 +137,18 @@ impl Node { // Handle `KeyShareCommit` messages from the coordinator fn handle_key_share_commit( &mut self, - _rack_uuid: Uuid, - _epoch: i32, + rack_uuid: &Uuid, + epoch: i32, + prepare_shared_distribution_digest: Sha3_256Digest, ) -> Result { - unimplemented!(); + self.db.commit_share( + &mut self.conn, + rack_uuid, + epoch, + prepare_shared_distribution_digest, + )?; + + Ok(NodeOpResult::CoordinatorAck) } } @@ -142,47 +157,90 @@ mod tests { use super::*; use crate::db::models::KeyShare; use crate::db::tests::new_shares; + use crate::trust_quorum::ShareDistribution; use omicron_test_utils::dev::test_setup_log; + use omicron_test_utils::dev::LogContext; use uuid::Uuid; - #[test] - fn initialize_can_run_again_if_epoch_0_is_not_committed() { + fn setup() -> (LogContext, Node, Vec) { let logctx = test_setup_log("test_db"); let config = Config { log: logctx.log.clone(), db_path: ":memory:".to_string() }; - let mut node = Node::new(config); - let shares = new_shares(); + let node = Node::new(config); + (logctx, node, new_shares()) + } + + #[test] + fn initialize_can_run_again_if_epoch_0_is_not_committed() { + let (logctx, mut node, share_distributions) = setup(); + let sd: SerializableShareDistribution = + share_distributions[0].clone().into(); let expected = Ok(NodeOpResult::CoordinatorAck); assert_eq!( expected, - node.handle_initialize(&Uuid::new_v4(), shares[0].clone().into()) + node.handle_initialize(&Uuid::new_v4(), sd.clone()) ); // We can re-initialize with a new uuid let rack_uuid = Uuid::new_v4(); - assert_eq!( - expected, - node.handle_initialize(&rack_uuid, shares[0].clone().into()) - ); + assert_eq!(expected, node.handle_initialize(&rack_uuid, sd.clone())); // We can re-initialize with a the same uuid - assert_eq!( - expected, - node.handle_initialize(&rack_uuid, shares[0].clone().into()) - ); + assert_eq!(expected, node.handle_initialize(&rack_uuid, sd.clone())); // Committing the key share for epoch 0 means we cannot initialize again let epoch = 0; - let prepare = KeyShare::new(epoch, shares[0].clone().into()).unwrap(); + let prepare = KeyShare::new(epoch, sd.clone()).unwrap(); node.db - .commit_share(&mut node.conn, epoch, prepare.share_digest.into()) + .commit_share( + &mut node.conn, + &rack_uuid, + epoch, + prepare.share_digest.into(), + ) .unwrap(); let expected = Err(NodeError::Db( crate::db::Error::AlreadyInitialized(rack_uuid.to_string()), )); + assert_eq!(expected, node.handle_initialize(&rack_uuid, sd)); + + logctx.cleanup_successful(); + } + + #[test] + fn initialize_and_reconfigure() { + let (logctx, mut node, share_distributions) = setup(); + let sd: SerializableShareDistribution = + share_distributions[0].clone().into(); + let rack_uuid = Uuid::new_v4(); + let ok_ack = Ok(NodeOpResult::CoordinatorAck); + let sd_digest = + KeyShare::share_distribution_digest(&sd).unwrap().into(); + let epoch = 0; + + // Successful rack initialization - The initialize request is the prepare + // Then we commit it. In the future, we will also prepare and commit a plan + // as well as a share distribution. In that case we will likely have a separate + // `InitializeCommit` message and will likely change the `Initialize` message to + // `InitializePrepare`. + assert_eq!(ok_ack, node.handle_initialize(&rack_uuid, sd)); assert_eq!( - expected, - node.handle_initialize(&rack_uuid, shares[0].clone().into()) + ok_ack, + node.handle_key_share_commit(&rack_uuid, epoch, sd_digest) + ); + + // Let's simulate a successful reconfiguration + let sd: SerializableShareDistribution = new_shares()[0].clone().into(); + let sd_digest = + KeyShare::share_distribution_digest(&sd).unwrap().into(); + let epoch = 1; + assert_eq!( + ok_ack, + node.handle_key_share_prepare(&rack_uuid, epoch, sd) + ); + assert_eq!( + ok_ack, + node.handle_key_share_commit(&rack_uuid, epoch, sd_digest) ); logctx.cleanup_successful(); diff --git a/test-utils/src/dev/mod.rs b/test-utils/src/dev/mod.rs index 45954cd4942..1e6c7e80c44 100644 --- a/test-utils/src/dev/mod.rs +++ b/test-utils/src/dev/mod.rs @@ -11,7 +11,7 @@ pub mod poll; pub mod test_cmds; use anyhow::Context; -use dropshot::test_util::LogContext; +pub use dropshot::test_util::LogContext; use dropshot::ConfigLogging; use dropshot::ConfigLoggingIfExists; use dropshot::ConfigLoggingLevel; From 7e139c206e0eeadea447879eaaff7079a3736ec6 Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Thu, 8 Sep 2022 05:39:25 +0000 Subject: [PATCH 10/16] Add some negative tests --- bootstore/src/db/mod.rs | 2 ++ bootstore/src/node.rs | 67 +++++++++++++++++++++++++++++++++++++++-- 2 files changed, 66 insertions(+), 3 deletions(-) diff --git a/bootstore/src/db/mod.rs b/bootstore/src/db/mod.rs index 9e0531d10dc..cba946d801b 100644 --- a/bootstore/src/db/mod.rs +++ b/bootstore/src/db/mod.rs @@ -226,6 +226,8 @@ impl Db { // and insert the new one. // // Return the old rack UUID if one exists + // + // TODO: Add a DB trigger to ensure only 1 row can exist at a time? fn initialize_rack_uuid( &self, tx: &mut SqliteConnection, diff --git a/bootstore/src/node.rs b/bootstore/src/node.rs index 3b2b3d2b8cd..8e0d7886d05 100644 --- a/bootstore/src/node.rs +++ b/bootstore/src/node.rs @@ -155,6 +155,7 @@ impl Node { #[cfg(test)] mod tests { use super::*; + use crate::db; use crate::db::models::KeyShare; use crate::db::tests::new_shares; use crate::trust_quorum::ShareDistribution; @@ -199,9 +200,9 @@ mod tests { ) .unwrap(); - let expected = Err(NodeError::Db( - crate::db::Error::AlreadyInitialized(rack_uuid.to_string()), - )); + let expected = Err(NodeError::Db(db::Error::AlreadyInitialized( + rack_uuid.to_string(), + ))); assert_eq!(expected, node.handle_initialize(&rack_uuid, sd)); logctx.cleanup_successful(); @@ -245,4 +246,64 @@ mod tests { logctx.cleanup_successful(); } + + #[test] + fn cannot_prepare_if_not_initialized() { + let (logctx, mut node, share_distributions) = setup(); + let sd: SerializableShareDistribution = + share_distributions[0].clone().into(); + let rack_uuid = Uuid::new_v4(); + let epoch = 1; + + // We don't have even a prepared key share at epoch 0 yet + assert_eq!( + Err(NodeError::Db(db::Error::RackNotInitialized)), + node.handle_key_share_prepare(&rack_uuid, epoch, sd.clone()) + ); + + // Create a prepare, but don't commit it + let ok_ack = Ok(NodeOpResult::CoordinatorAck); + assert_eq!(ok_ack, node.handle_initialize(&rack_uuid, sd.clone())); + + // Try (and fail) to issue a prepare for epoch 1 again. The actual contents of the + // share distribution doesn't matter. + assert_eq!( + Err(NodeError::Db(db::Error::RackNotInitialized)), + node.handle_key_share_prepare(&rack_uuid, epoch, sd.clone()) + ); + + logctx.cleanup_successful(); + } + + #[test] + fn cannot_prepare_with_bad_rack_uuid() { + let (logctx, mut node, share_distributions) = setup(); + let sd: SerializableShareDistribution = + share_distributions[0].clone().into(); + let rack_uuid = Uuid::new_v4(); + let ok_ack = Ok(NodeOpResult::CoordinatorAck); + let sd_digest = + KeyShare::share_distribution_digest(&sd).unwrap().into(); + let epoch = 0; + + // Successful rack initialization + assert_eq!(ok_ack, node.handle_initialize(&rack_uuid, sd.clone())); + assert_eq!( + ok_ack, + node.handle_key_share_commit(&rack_uuid, epoch, sd_digest) + ); + + // Attempt to prepare with an invalid rack uuid + let bad_uuid = Uuid::new_v4(); + let epoch = 1; + assert_eq!( + Err(NodeError::Db(db::Error::RackUuidMismatch { + expected: bad_uuid.to_string(), + actual: Some(rack_uuid.to_string()) + })), + node.handle_key_share_prepare(&bad_uuid, epoch, sd.clone()) + ); + + logctx.cleanup_successful(); + } } From b84d120eb96b3f21f0a104e3e7e95928b1b00acf Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Fri, 9 Sep 2022 17:47:39 +0000 Subject: [PATCH 11/16] Parse UUID strings in DB layer and some other small fixes --- bootstore/src/db/error.rs | 10 +++++++--- bootstore/src/db/mod.rs | 33 +++++++++++++++++++-------------- bootstore/src/messages.rs | 4 ++-- bootstore/src/node.rs | 11 +++++------ 4 files changed, 33 insertions(+), 25 deletions(-) diff --git a/bootstore/src/db/error.rs b/bootstore/src/db/error.rs index affbdf76ccb..34f2b76ae22 100644 --- a/bootstore/src/db/error.rs +++ b/bootstore/src/db/error.rs @@ -5,6 +5,7 @@ //! DB related errors use serde::{Deserialize, Serialize}; +use uuid::Uuid; #[derive(thiserror::Error, Debug, Clone, PartialEq, Serialize, Deserialize)] pub enum Error { @@ -17,6 +18,9 @@ pub enum Error { #[error("BCS serialization error: {err}")] Bcs { err: String }, + #[error("Failed to parse string as UUID: {uuidstr} - {err}")] + ParseUuid { uuidstr: String, err: String }, + // Temporary until the using code is written #[allow(dead_code)] #[error("Share commit for {epoch} does not match prepare")] @@ -29,10 +33,10 @@ pub enum Error { DbInvariant(String), #[error("Already initialized with rack uuid: {0}")] - AlreadyInitialized(String), + AlreadyInitialized(Uuid), #[error( - "Tried to Prepare a KeyShare with epoch {epoch}, but found one + "Tried to Prepare a KeyShare with epoch {epoch}, but found one \ with later epoch {stored_epoch}" )] OldKeySharePrepare { epoch: i32, stored_epoch: i32 }, @@ -41,7 +45,7 @@ pub enum Error { KeyShareAlreadyExists { epoch: i32 }, #[error("Rack UUID mismatch: Expected: {expected}, Actual: {actual:?}")] - RackUuidMismatch { expected: String, actual: Option }, + RackUuidMismatch { expected: Uuid, actual: Option }, #[error("Rack not initialized")] RackNotInitialized, diff --git a/bootstore/src/db/mod.rs b/bootstore/src/db/mod.rs index cba946d801b..104f50222b4 100644 --- a/bootstore/src/db/mod.rs +++ b/bootstore/src/db/mod.rs @@ -186,15 +186,14 @@ impl Db { let uuid = self.get_rack_uuid(tx)?.unwrap(); return Err(Error::AlreadyInitialized(uuid)); } - let new_uuid = rack_uuid.to_string(); - match self.initialize_rack_uuid(tx, &new_uuid)? { + match self.initialize_rack_uuid(tx, &rack_uuid)? { Some(old_uuid) => { - info!(self.log, "Re-Initializing Rack: Old UUID: {old_uuid}, New UUID: {new_uuid}"); + info!(self.log, "Re-Initializing Rack: Old UUID: {old_uuid}, New UUID: {rack_uuid}"); self.update_prepare_for_epoch_0(tx, share_distribution) }, None => { - info!(self.log, "Initializing Rack for the first time with UUID: {new_uuid}"); + info!(self.log, "Initializing Rack for the first time with UUID: {rack_uuid}"); self.insert_prepare(tx, 0, share_distribution) } } @@ -202,7 +201,7 @@ impl Db { }) } - // Insert an uncommitted key share for a given epoch This should be called + // Insert an uncommitted key share for a given epoch. This should be called // inside a transaction by the caller so that the given checks can be // performed. fn insert_prepare( @@ -219,7 +218,7 @@ impl Db { // During rack initialization we set the rack UUID. // - // We only allow rewriting this UUID in when the rack is not initialized, + // We only allow rewriting this UUID when the rack is not initialized, // and so we only call this from the `initialize` method. // // Since we only allow a single row, we just delete the existing rows @@ -231,13 +230,13 @@ impl Db { fn initialize_rack_uuid( &self, tx: &mut SqliteConnection, - new_uuid: &str, - ) -> Result, Error> { + new_uuid: &Uuid, + ) -> Result, Error> { use schema::rack::dsl; let old_uuid = self.get_rack_uuid(tx)?; diesel::delete(dsl::rack).execute(tx)?; diesel::insert_into(dsl::rack) - .values(dsl::uuid.eq(new_uuid)) + .values(dsl::uuid.eq(new_uuid.to_string())) .execute(tx)?; Ok(old_uuid) } @@ -307,11 +306,10 @@ impl Db { tx: &mut SqliteConnection, rack_uuid: &Uuid, ) -> Result<(), Error> { - let rack_uuid = rack_uuid.to_string(); let stored_rack_uuid = self.get_rack_uuid(tx)?; - if Some(&rack_uuid) != stored_rack_uuid.as_ref() { + if Some(rack_uuid) != stored_rack_uuid.as_ref() { return Err(Error::RackUuidMismatch { - expected: rack_uuid, + expected: rack_uuid.clone(), actual: stored_rack_uuid, }); } @@ -321,10 +319,17 @@ impl Db { pub fn get_rack_uuid( &self, tx: &mut SqliteConnection, - ) -> Result, Error> { + ) -> Result, Error> { use schema::rack::dsl; - Ok(dsl::rack.select(dsl::uuid).get_result::(tx).optional()?) + dsl::rack.select(dsl::uuid).get_result::(tx).optional()?.map_or( + Ok(None), + |uuidstr| { + Uuid::parse_str(&uuidstr).map(|uuid| Some(uuid)).map_err( + |err| Error::ParseUuid { uuidstr, err: err.to_string() }, + ) + }, + ) } } diff --git a/bootstore/src/messages.rs b/bootstore/src/messages.rs index 65b61dcce4a..6006a557f80 100644 --- a/bootstore/src/messages.rs +++ b/bootstore/src/messages.rs @@ -112,7 +112,7 @@ pub enum NodeError { AlreadyInitialized { rack_uuid: Uuid }, #[error( - "No corresponding key share prepare for this commit: rack UUID: + "No corresponding key share prepare for this commit: rack UUID: \ {rack_uuid}, epoch: {epoch}" )] MissingKeySharePrepare { rack_uuid: Uuid, epoch: i32 }, @@ -121,7 +121,7 @@ pub enum NodeError { Db(db::Error), #[error( - "'KeySharePrepare' messages are not allowed for epoch 0. + "'KeySharePrepare' messages are not allowed for epoch 0. \ Please send an 'Initialize' message" )] KeySharePrepareForEpoch0, diff --git a/bootstore/src/node.rs b/bootstore/src/node.rs index 8e0d7886d05..e4abf9756a5 100644 --- a/bootstore/src/node.rs +++ b/bootstore/src/node.rs @@ -185,7 +185,7 @@ mod tests { let rack_uuid = Uuid::new_v4(); assert_eq!(expected, node.handle_initialize(&rack_uuid, sd.clone())); - // We can re-initialize with a the same uuid + // We can re-initialize with the same uuid assert_eq!(expected, node.handle_initialize(&rack_uuid, sd.clone())); // Committing the key share for epoch 0 means we cannot initialize again @@ -200,9 +200,8 @@ mod tests { ) .unwrap(); - let expected = Err(NodeError::Db(db::Error::AlreadyInitialized( - rack_uuid.to_string(), - ))); + let expected = + Err(NodeError::Db(db::Error::AlreadyInitialized(rack_uuid))); assert_eq!(expected, node.handle_initialize(&rack_uuid, sd)); logctx.cleanup_successful(); @@ -298,8 +297,8 @@ mod tests { let epoch = 1; assert_eq!( Err(NodeError::Db(db::Error::RackUuidMismatch { - expected: bad_uuid.to_string(), - actual: Some(rack_uuid.to_string()) + expected: bad_uuid, + actual: Some(rack_uuid) })), node.handle_key_share_prepare(&bad_uuid, epoch, sd.clone()) ); From 0b6f4d21cfeb928e3b5315201b542c8bc804bb42 Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Fri, 9 Sep 2022 18:56:24 +0000 Subject: [PATCH 12/16] clippy clean --- bootstore/src/db/mod.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/bootstore/src/db/mod.rs b/bootstore/src/db/mod.rs index 104f50222b4..89e79ccbaab 100644 --- a/bootstore/src/db/mod.rs +++ b/bootstore/src/db/mod.rs @@ -65,7 +65,7 @@ impl Db { diesel::sql_query("PRAGMA synchronous = 'FULL'").execute(&mut c)?; // Create tables - c.batch_execute(&schema)?; + c.batch_execute(schema)?; Ok((Db { log, path: path.to_string() }, c)) } @@ -186,7 +186,7 @@ impl Db { let uuid = self.get_rack_uuid(tx)?.unwrap(); return Err(Error::AlreadyInitialized(uuid)); } - match self.initialize_rack_uuid(tx, &rack_uuid)? { + match self.initialize_rack_uuid(tx, rack_uuid)? { Some(old_uuid) => { info!(self.log, "Re-Initializing Rack: Old UUID: {old_uuid}, New UUID: {rack_uuid}"); self.update_prepare_for_epoch_0(tx, share_distribution) @@ -309,7 +309,7 @@ impl Db { let stored_rack_uuid = self.get_rack_uuid(tx)?; if Some(rack_uuid) != stored_rack_uuid.as_ref() { return Err(Error::RackUuidMismatch { - expected: rack_uuid.clone(), + expected: *rack_uuid, actual: stored_rack_uuid, }); } @@ -325,9 +325,9 @@ impl Db { dsl::rack.select(dsl::uuid).get_result::(tx).optional()?.map_or( Ok(None), |uuidstr| { - Uuid::parse_str(&uuidstr).map(|uuid| Some(uuid)).map_err( - |err| Error::ParseUuid { uuidstr, err: err.to_string() }, - ) + Uuid::parse_str(&uuidstr).map(Some).map_err(|err| { + Error::ParseUuid { uuidstr, err: err.to_string() } + }) }, ) } From fe3f84a2fe0c140e8b20631ebcacfe8ee0281fdd Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Fri, 9 Sep 2022 21:05:17 +0000 Subject: [PATCH 13/16] Db now contains the one true connection --- bootstore/src/db/mod.rs | 330 ++++++++++++++++++---------------------- bootstore/src/node.rs | 28 +--- 2 files changed, 157 insertions(+), 201 deletions(-) diff --git a/bootstore/src/db/mod.rs b/bootstore/src/db/mod.rs index 89e79ccbaab..68b85902096 100644 --- a/bootstore/src/db/mod.rs +++ b/bootstore/src/db/mod.rs @@ -25,9 +25,7 @@ use models::Share; pub struct Db { log: Logger, - // We use a String, because sqlite requires paths to be UTF-8, - // and strings guarantee that constraint. - path: String, + conn: SqliteConnection, } // Temporary until the using code is written @@ -36,10 +34,7 @@ impl Db { /// Initialize the database /// /// Create tables if they don't exist and set pragmas - pub fn init( - log: &Logger, - path: &str, - ) -> Result<(Db, SqliteConnection), Error> { + pub fn init(log: &Logger, path: &str) -> Result { let schema = include_str!("./schema.sql"); let log = log.new(o!( "component" => "BootstoreDb" @@ -67,17 +62,35 @@ impl Db { // Create tables c.batch_execute(schema)?; - Ok((Db { log, path: path.to_string() }, c)) + Ok(Db { log, conn: c }) } - pub fn new_connection(&self) -> Result { - info!( - self.log, - "Creating a new connection to database {:?}", self.path - ); - SqliteConnection::establish(&self.path).map_err(|err| Error::DbOpen { - path: self.path.clone(), - err: err.to_string(), + /// Write a KeyShare for epoch a along with the rack UUID + /// + /// The share must be committed with [`commit_share`] + pub fn initialize( + &mut self, + rack_uuid: &Uuid, + share_distribution: SerializableShareDistribution, + ) -> Result<(), Error> { + self.conn.immediate_transaction(|tx| { + if is_initialized(tx)? { + // If the rack is initialized, a rack uuid must exist + let uuid = get_rack_uuid(tx)?.unwrap(); + return Err(Error::AlreadyInitialized(uuid)); + } + match initialize_rack_uuid(tx, rack_uuid)? { + Some(old_uuid) => { + info!(self.log, "Re-Initializing Rack: Old UUID: {old_uuid}, New UUID: {rack_uuid}"); + update_prepare_for_epoch_0(tx, share_distribution) + + }, + None => { + info!(self.log, "Initializing Rack for the first time with UUID: {rack_uuid}"); + insert_prepare(tx, 0, share_distribution) + } + } + }) } @@ -92,8 +105,7 @@ impl Db { /// Calling this method with an epoch of 0 is a programmer error so we /// assert. pub fn prepare_share( - &self, - conn: &mut SqliteConnection, + &mut self, rack_uuid: &Uuid, epoch: i32, share_distribution: SerializableShareDistribution, @@ -101,14 +113,14 @@ impl Db { assert_ne!(0, epoch); use schema::key_shares::dsl; let prepare = KeyShare::new(epoch, share_distribution)?; - conn.immediate_transaction(|tx| { + self.conn.immediate_transaction(|tx| { // Has the rack been initialized? - if !self.is_initialized(tx)? { + if !is_initialized(tx)? { return Err(Error::RackNotInitialized); } // Does the rack_uuid match what's stored? - self.validate_rack_uuid(tx, rack_uuid)?; + validate_rack_uuid(tx, rack_uuid)?; // Check for idempotence if let Some(stored_key_share) = dsl::key_shares @@ -145,16 +157,15 @@ impl Db { } pub fn commit_share( - &self, - conn: &mut SqliteConnection, + &mut self, rack_uuid: &Uuid, epoch: i32, digest: sprockets_common::Sha3_256Digest, ) -> Result<(), Error> { use schema::key_shares::dsl; - conn.immediate_transaction(|tx| { + self.conn.immediate_transaction(|tx| { // Does the rack_uuid match what's stored? - self.validate_rack_uuid(tx, rack_uuid)?; + validate_rack_uuid(tx, rack_uuid)?; // We only want to commit if the share digest of the commit is the // same as that of the prepare. @@ -174,111 +185,13 @@ impl Db { }) } - pub fn initialize( - &self, - conn: &mut SqliteConnection, - rack_uuid: &Uuid, - share_distribution: SerializableShareDistribution, - ) -> Result<(), Error> { - conn.immediate_transaction(|tx| { - if self.is_initialized(tx)? { - // If the rack is initialized, a rack uuid must exist - let uuid = self.get_rack_uuid(tx)?.unwrap(); - return Err(Error::AlreadyInitialized(uuid)); - } - match self.initialize_rack_uuid(tx, rack_uuid)? { - Some(old_uuid) => { - info!(self.log, "Re-Initializing Rack: Old UUID: {old_uuid}, New UUID: {rack_uuid}"); - self.update_prepare_for_epoch_0(tx, share_distribution) - - }, - None => { - info!(self.log, "Initializing Rack for the first time with UUID: {rack_uuid}"); - self.insert_prepare(tx, 0, share_distribution) - } - } - - }) - } - - // Insert an uncommitted key share for a given epoch. This should be called - // inside a transaction by the caller so that the given checks can be - // performed. - fn insert_prepare( - &self, - tx: &mut SqliteConnection, - epoch: i32, - share_distribution: SerializableShareDistribution, - ) -> Result<(), Error> { - use schema::key_shares::dsl; - let prepare = KeyShare::new(epoch, share_distribution)?; - diesel::insert_into(dsl::key_shares).values(&prepare).execute(tx)?; - Ok(()) - } - - // During rack initialization we set the rack UUID. - // - // We only allow rewriting this UUID when the rack is not initialized, - // and so we only call this from the `initialize` method. - // - // Since we only allow a single row, we just delete the existing rows - // and insert the new one. - // - // Return the old rack UUID if one exists - // - // TODO: Add a DB trigger to ensure only 1 row can exist at a time? - fn initialize_rack_uuid( - &self, - tx: &mut SqliteConnection, - new_uuid: &Uuid, - ) -> Result, Error> { - use schema::rack::dsl; - let old_uuid = self.get_rack_uuid(tx)?; - diesel::delete(dsl::rack).execute(tx)?; - diesel::insert_into(dsl::rack) - .values(dsl::uuid.eq(new_uuid.to_string())) - .execute(tx)?; - Ok(old_uuid) - } - - // Overwrite a share for epoch 0 during rack initialization - // - // This method should only be called from initialize. - fn update_prepare_for_epoch_0( - &self, - tx: &mut SqliteConnection, - share_distribution: SerializableShareDistribution, - ) -> Result<(), Error> { - use schema::key_shares::dsl; - let prepare = KeyShare::new(0, share_distribution)?; - diesel::update(dsl::key_shares).set(prepare).execute(tx)?; - Ok(()) - } - - pub fn get_latest_committed_share( - &self, - conn: &mut SqliteConnection, - ) -> Result<(i32, Share), Error> { - use schema::key_shares::dsl; - let res = dsl::key_shares - .select((dsl::epoch, dsl::share)) - .order(dsl::epoch.desc()) - .filter(dsl::committed.eq(true)) - .get_result::<(i32, Share)>(conn)?; - Ok(res) - } - - pub fn get_committed_share( - &self, - conn: &mut SqliteConnection, - epoch: i32, - ) -> Result { + pub fn get_committed_share(&mut self, epoch: i32) -> Result { use schema::key_shares::dsl; let share = dsl::key_shares .select(dsl::share) .filter(dsl::epoch.eq(epoch)) .filter(dsl::committed.eq(true)) - .get_result::(conn) + .get_result::(&mut self.conn) .optional()?; match share { @@ -286,51 +199,102 @@ impl Db { None => Err(Error::KeyShareNotCommitted { epoch }), } } +} - /// Return true if there is a commit for epoch 0, false otherwise - fn is_initialized(&self, tx: &mut SqliteConnection) -> Result { - use schema::key_shares::dsl; - Ok(dsl::key_shares - .select(dsl::epoch) - .filter(dsl::epoch.eq(0)) - .filter(dsl::committed.eq(true)) - .get_result::(tx) - .optional()? - .is_some()) - } +// +// ---------- DB HELPER METHODS +// + +// Insert an uncommitted key share for a given epoch. This should be called +// inside a transaction by the caller so that the given checks can be +// performed. +fn insert_prepare( + tx: &mut SqliteConnection, + epoch: i32, + share_distribution: SerializableShareDistribution, +) -> Result<(), Error> { + use schema::key_shares::dsl; + let prepare = KeyShare::new(epoch, share_distribution)?; + diesel::insert_into(dsl::key_shares).values(&prepare).execute(tx)?; + Ok(()) +} - /// Return `Ok(())` if the persisted rack uuid matches `uuid`, or - /// `Err(Error::RackUuidMismatch{..})` otherwise. - fn validate_rack_uuid( - &self, - tx: &mut SqliteConnection, - rack_uuid: &Uuid, - ) -> Result<(), Error> { - let stored_rack_uuid = self.get_rack_uuid(tx)?; - if Some(rack_uuid) != stored_rack_uuid.as_ref() { - return Err(Error::RackUuidMismatch { - expected: *rack_uuid, - actual: stored_rack_uuid, - }); - } - Ok(()) - } +// During rack initialization we set the rack UUID. +// +// We only allow rewriting this UUID when the rack is not initialized, +// and so we only call this from the `initialize` method. +// +// Since we only allow a single row, we just delete the existing rows +// and insert the new one. +// +// Return the old rack UUID if one exists +// +// TODO: Add a DB trigger to ensure only 1 row can exist at a time? +fn initialize_rack_uuid( + tx: &mut SqliteConnection, + new_uuid: &Uuid, +) -> Result, Error> { + use schema::rack::dsl; + let old_uuid = get_rack_uuid(tx)?; + diesel::delete(dsl::rack).execute(tx)?; + diesel::insert_into(dsl::rack) + .values(dsl::uuid.eq(new_uuid.to_string())) + .execute(tx)?; + Ok(old_uuid) +} + +// Overwrite a share for epoch 0 during rack initialization +// +// This method should only be called from initialize. +fn update_prepare_for_epoch_0( + tx: &mut SqliteConnection, + share_distribution: SerializableShareDistribution, +) -> Result<(), Error> { + use schema::key_shares::dsl; + let prepare = KeyShare::new(0, share_distribution)?; + diesel::update(dsl::key_shares).set(prepare).execute(tx)?; + Ok(()) +} - pub fn get_rack_uuid( - &self, - tx: &mut SqliteConnection, - ) -> Result, Error> { - use schema::rack::dsl; - - dsl::rack.select(dsl::uuid).get_result::(tx).optional()?.map_or( - Ok(None), - |uuidstr| { - Uuid::parse_str(&uuidstr).map(Some).map_err(|err| { - Error::ParseUuid { uuidstr, err: err.to_string() } - }) - }, - ) +/// Return true if there is a commit for epoch 0, false otherwise +fn is_initialized(tx: &mut SqliteConnection) -> Result { + use schema::key_shares::dsl; + Ok(dsl::key_shares + .select(dsl::epoch) + .filter(dsl::epoch.eq(0)) + .filter(dsl::committed.eq(true)) + .get_result::(tx) + .optional()? + .is_some()) +} + +/// Return `Ok(())` if the persisted rack uuid matches `uuid`, or +/// `Err(Error::RackUuidMismatch{..})` otherwise. +fn validate_rack_uuid( + tx: &mut SqliteConnection, + rack_uuid: &Uuid, +) -> Result<(), Error> { + let stored_rack_uuid = get_rack_uuid(tx)?; + if Some(rack_uuid) != stored_rack_uuid.as_ref() { + return Err(Error::RackUuidMismatch { + expected: *rack_uuid, + actual: stored_rack_uuid, + }); } + Ok(()) +} + +pub fn get_rack_uuid(tx: &mut SqliteConnection) -> Result, Error> { + use schema::rack::dsl; + + dsl::rack.select(dsl::uuid).get_result::(tx).optional()?.map_or( + Ok(None), + |uuidstr| { + Uuid::parse_str(&uuidstr).map(Some).map_err(|err| { + Error::ParseUuid { uuidstr, err: err.to_string() } + }) + }, + ) } #[cfg(test)] @@ -340,6 +304,14 @@ pub mod tests { use omicron_test_utils::dev::test_setup_log; use sha3::{Digest, Sha3_256}; + // This is solely so we can test the non-member functions with a properly + // initialized DB + impl Db { + fn get_conn(&mut self) -> &mut SqliteConnection { + &mut self.conn + } + } + // TODO: Fill in with actual member certs pub fn new_shares() -> Vec { let member_device_id_certs = vec![]; @@ -364,15 +336,16 @@ pub mod tests { fn simple_prepare_insert_and_query() { use schema::key_shares::dsl; let logctx = test_setup_log("test_db"); - let (db, mut conn) = Db::init(&logctx.log, ":memory:").unwrap(); + let mut db = Db::init(&logctx.log, ":memory:").unwrap(); let shares = new_shares(); let epoch = 0; let expected: SerializableShareDistribution = shares[0].clone().into(); - db.insert_prepare(&mut conn, epoch, expected.clone()).unwrap(); + let conn = db.get_conn(); + insert_prepare(conn, epoch, expected.clone()).unwrap(); let (share, committed) = dsl::key_shares .select((dsl::share, dsl::committed)) .filter(dsl::epoch.eq(epoch)) - .get_result::<(Share, bool)>(&mut conn) + .get_result::<(Share, bool)>(conn) .unwrap(); assert_eq!(share.0, expected); assert_eq!(committed, false); @@ -382,25 +355,24 @@ pub mod tests { #[test] fn commit_fails_without_corresponding_prepare() { let logctx = test_setup_log("test_db"); - let (db, mut conn) = Db::init(&logctx.log, ":memory:").unwrap(); + let mut db = Db::init(&logctx.log, ":memory:").unwrap(); let epoch = 0; let digest = sprockets_common::Sha3_256Digest::default(); - db.commit_share(&mut conn, &Uuid::new_v4(), epoch, digest).unwrap_err(); + db.commit_share(&Uuid::new_v4(), epoch, digest).unwrap_err(); logctx.cleanup_successful(); } #[test] fn commit_fails_with_invalid_hash() { let logctx = test_setup_log("test_db"); - let (db, mut conn) = Db::init(&logctx.log, ":memory:").unwrap(); + let mut db = Db::init(&logctx.log, ":memory:").unwrap(); let shares = new_shares(); let epoch = 0; let expected: SerializableShareDistribution = shares[0].clone().into(); let rack_uuid = Uuid::new_v4(); - db.initialize(&mut conn, &rack_uuid, expected.clone()).unwrap(); + db.initialize(&rack_uuid, expected.clone()).unwrap(); let digest = sprockets_common::Sha3_256Digest::default(); - let err = - db.commit_share(&mut conn, &rack_uuid, epoch, digest).unwrap_err(); + let err = db.commit_share(&rack_uuid, epoch, digest).unwrap_err(); assert!(matches!(err, Error::CommitHashMismatch { epoch: _ })); logctx.cleanup_successful(); } @@ -408,25 +380,25 @@ pub mod tests { #[test] fn commit_succeeds_with_correct_hash() { let logctx = test_setup_log("test_db"); - let (db, mut conn) = Db::init(&logctx.log, ":memory:").unwrap(); + let mut db = Db::init(&logctx.log, ":memory:").unwrap(); let shares = new_shares(); let epoch = 0; let expected: SerializableShareDistribution = shares[0].clone().into(); let rack_uuid = Uuid::new_v4(); - db.initialize(&mut conn, &rack_uuid, expected.clone()).unwrap(); + db.initialize(&rack_uuid, expected.clone()).unwrap(); let val = bcs::to_bytes(&expected).unwrap(); let digest = sprockets_common::Sha3_256Digest(Sha3_256::digest(&val).into()) .into(); - assert!(db.commit_share(&mut conn, &rack_uuid, epoch, digest).is_ok()); + assert!(db.commit_share(&rack_uuid, epoch, digest).is_ok()); // Ensure `committed = true` use schema::key_shares::dsl; let committed = dsl::key_shares .select(dsl::committed) .filter(dsl::epoch.eq(epoch)) - .get_result::(&mut conn) + .get_result::(db.get_conn()) .unwrap(); assert_eq!(true, committed); logctx.cleanup_successful(); diff --git a/bootstore/src/node.rs b/bootstore/src/node.rs index e4abf9756a5..f0c7edbdfcf 100644 --- a/bootstore/src/node.rs +++ b/bootstore/src/node.rs @@ -7,7 +7,6 @@ //! Most logic is contained here, but networking sits on top. //! This allows easier testing of clusters and failure situations. -use diesel::SqliteConnection; use slog::Logger; use sprockets_common::Sha3_256Digest; //use sprockets_host::Ed25519Certificate; @@ -42,17 +41,13 @@ pub struct Config { pub struct Node { config: Config, db: Db, - - // For now we just use a single connection - // This *may* change once there is an async connection handling mechanism on top. - conn: SqliteConnection, } impl Node { /// Create a new Node pub fn new(config: Config) -> Node { - let (db, conn) = Db::init(&config.log, &config.db_path).unwrap(); - Node { config, db, conn } + let db = Db::init(&config.log, &config.db_path).unwrap(); + Node { config, db } } /// Handle a message received over sprockets from another [`Node`] or @@ -99,7 +94,7 @@ impl Node { &mut self, epoch: i32, ) -> Result { - let share = self.db.get_committed_share(&mut self.conn, epoch)?; + let share = self.db.get_committed_share(epoch)?; Ok(NodeOpResult::Share { epoch, share: share.0.share }) } @@ -109,7 +104,7 @@ impl Node { rack_uuid: &Uuid, share_distribution: SerializableShareDistribution, ) -> Result { - self.db.initialize(&mut self.conn, rack_uuid, share_distribution)?; + self.db.initialize(rack_uuid, share_distribution)?; Ok(NodeOpResult::CoordinatorAck) } @@ -124,12 +119,7 @@ impl Node { return Err(NodeError::KeySharePrepareForEpoch0); } - self.db.prepare_share( - &mut self.conn, - rack_uuid, - epoch, - share_distribution, - )?; + self.db.prepare_share(rack_uuid, epoch, share_distribution)?; Ok(NodeOpResult::CoordinatorAck) } @@ -142,7 +132,6 @@ impl Node { prepare_shared_distribution_digest: Sha3_256Digest, ) -> Result { self.db.commit_share( - &mut self.conn, rack_uuid, epoch, prepare_shared_distribution_digest, @@ -192,12 +181,7 @@ mod tests { let epoch = 0; let prepare = KeyShare::new(epoch, sd.clone()).unwrap(); node.db - .commit_share( - &mut node.conn, - &rack_uuid, - epoch, - prepare.share_digest.into(), - ) + .commit_share(&rack_uuid, epoch, prepare.share_digest.into()) .unwrap(); let expected = From a55e683147a5ecb2052c61b47ea0e614b4c95bdc Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Fri, 9 Sep 2022 21:59:23 +0000 Subject: [PATCH 14/16] Add trigger to ensure only one rack row is present --- Cargo.lock | 7 +++++++ bootstore/Cargo.toml | 1 + bootstore/src/db/mod.rs | 22 ++++++++++++++++++++++ bootstore/src/db/schema.sql | 10 ++++++++++ 4 files changed, 40 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 03dc93ab69f..f5f882550db 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -83,6 +83,12 @@ dependencies = [ "term", ] +[[package]] +name = "assert_matches" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" + [[package]] name = "async-bb8-diesel" version = "0.1.0" @@ -352,6 +358,7 @@ dependencies = [ name = "bootstore" version = "0.1.0" dependencies = [ + "assert_matches", "bcs", "bincode", "derive_more", diff --git a/bootstore/Cargo.toml b/bootstore/Cargo.toml index 6ef8366391c..e19d2df5250 100644 --- a/bootstore/Cargo.toml +++ b/bootstore/Cargo.toml @@ -30,5 +30,6 @@ uuid = { version = "1.1.0", features = [ "serde", "v4" ] } vsss-rs = { version = "2.0.0", default-features = false, features = ["std"] } [dev-dependencies] +assert_matches = "1.5.0" bincode = "1.3.3" omicron-test-utils = { path = "../test-utils" } diff --git a/bootstore/src/db/mod.rs b/bootstore/src/db/mod.rs index 68b85902096..27c9d196bc0 100644 --- a/bootstore/src/db/mod.rs +++ b/bootstore/src/db/mod.rs @@ -301,6 +301,7 @@ pub fn get_rack_uuid(tx: &mut SqliteConnection) -> Result, Error> { pub mod tests { use super::*; use crate::trust_quorum::{RackSecret, ShareDistribution}; + use assert_matches::assert_matches; use omicron_test_utils::dev::test_setup_log; use sha3::{Digest, Sha3_256}; @@ -403,4 +404,25 @@ pub mod tests { assert_eq!(true, committed); logctx.cleanup_successful(); } + + #[test] + fn ensure_db_trigger_fires_for_more_than_one_row_in_rack_table() { + let logctx = test_setup_log("test_db"); + let mut db = Db::init(&logctx.log, ":memory:").unwrap(); + use schema::rack::dsl; + // One insert succeeds. + diesel::insert_into(dsl::rack) + .values(dsl::uuid.eq(Uuid::new_v4().to_string())) + .execute(db.get_conn()) + .unwrap(); + + // A second fails + let err = diesel::insert_into(dsl::rack) + .values(dsl::uuid.eq(Uuid::new_v4().to_string())) + .execute(db.get_conn()) + .unwrap_err(); + assert_matches!(err, diesel::result::Error::DatabaseError(_, s) => { + assert_eq!("maximum one rack", s.message()); + }); + } } diff --git a/bootstore/src/db/schema.sql b/bootstore/src/db/schema.sql index e755919aece..bf74d917e82 100644 --- a/bootstore/src/db/schema.sql +++ b/bootstore/src/db/schema.sql @@ -22,3 +22,13 @@ CREATE TABLE IF NOT EXISTS encrypted_root_secrets ( PRIMARY KEY (epoch) FOREIGN KEY (epoch) REFERENCES key_share_prepares (epoch) ); + +-- Ensure there is no more than one rack row +-- We rollback the whole transaction in the case of a constraint +-- violation, since we have broken an invariant and should not proceed. +CREATE TRIGGER ensure_rack_contains_at_most_one_row +BEFORE INSERT ON rack +WHEN (SELECT COUNT(*) FROM rack) >= 1 +BEGIN + SELECT RAISE(ROLLBACK, 'maximum one rack'); +END; From 3336ea44a9a13de7d9c7be099ea9659d66eeed3d Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Fri, 9 Sep 2022 22:00:20 +0000 Subject: [PATCH 15/16] remove implemented TODO comment --- bootstore/src/db/mod.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/bootstore/src/db/mod.rs b/bootstore/src/db/mod.rs index 27c9d196bc0..07ada0af4c6 100644 --- a/bootstore/src/db/mod.rs +++ b/bootstore/src/db/mod.rs @@ -228,8 +228,6 @@ fn insert_prepare( // and insert the new one. // // Return the old rack UUID if one exists -// -// TODO: Add a DB trigger to ensure only 1 row can exist at a time? fn initialize_rack_uuid( tx: &mut SqliteConnection, new_uuid: &Uuid, From 39a7ca6a5cb19b4e617bf59fdc9756bc9596136f Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Fri, 9 Sep 2022 22:46:04 +0000 Subject: [PATCH 16/16] cleanup after myself --- bootstore/src/db/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/bootstore/src/db/mod.rs b/bootstore/src/db/mod.rs index 07ada0af4c6..3dcd8ca5d1f 100644 --- a/bootstore/src/db/mod.rs +++ b/bootstore/src/db/mod.rs @@ -422,5 +422,6 @@ pub mod tests { assert_matches!(err, diesel::result::Error::DatabaseError(_, s) => { assert_eq!("maximum one rack", s.message()); }); + logctx.cleanup_successful(); } }