From 473b1caefb1737281d4d2441ef9502bbca24ad2f Mon Sep 17 00:00:00 2001 From: Peter Nose Date: Wed, 7 Dec 2022 20:28:17 +0100 Subject: [PATCH 1/8] keymanager/src/policy: Fix copy-pasted comment and logging --- go/runtime/host/multi/multi.go | 2 +- keymanager/src/policy/cached.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go/runtime/host/multi/multi.go b/go/runtime/host/multi/multi.go index 78adf4585ca..945f7965668 100644 --- a/go/runtime/host/multi/multi.go +++ b/go/runtime/host/multi/multi.go @@ -192,7 +192,7 @@ func (agg *Aggregate) SetVersion(ctx context.Context, version version.Version) e return nil } - // Otherwise tear it dow. + // Otherwise tear it down. agg.stopActiveLocked() } diff --git a/keymanager/src/policy/cached.rs b/keymanager/src/policy/cached.rs index 4354ebed29a..3beb16ec28e 100644 --- a/keymanager/src/policy/cached.rs +++ b/keymanager/src/policy/cached.rs @@ -199,10 +199,10 @@ impl Policy { fn save_raw_policy(untrusted_local: &dyn KeyValue, raw_policy: &[u8]) { let ciphertext = seal(Keypolicy::MRENCLAVE, POLICY_SEAL_CONTEXT, raw_policy); - // Persist the encrypted master secret. + // Persist the encrypted policy. untrusted_local .insert(POLICY_STORAGE_KEY.to_vec(), ciphertext) - .expect("failed to persist master secret"); + .expect("failed to persist policy"); } } From 961acd50d14583e70fb85caf6437b684b8fbced7 Mon Sep 17 00:00:00 2001 From: Peter Nose Date: Wed, 7 Dec 2022 21:50:13 +0100 Subject: [PATCH 2/8] runtime/src/rak: Fetch quote policy only once when doing attestations Every deployment has its own quote policy which cannot change while the runtime is running. Fetching the policy on every attestation is time consuming and can be done only once, even before the protocol is fully initialized. --- runtime/src/attestation.rs | 51 ++++++++++++++++++++++---------------- runtime/src/rak.rs | 37 ++++++++++++++++++++++++--- 2 files changed, 62 insertions(+), 26 deletions(-) diff --git a/runtime/src/attestation.rs b/runtime/src/attestation.rs index fbd8c14db3b..ceed97e345a 100644 --- a/runtime/src/attestation.rs +++ b/runtime/src/attestation.rs @@ -104,36 +104,43 @@ impl Handler { } fn set_quote(&self, ctx: Context, quote: Quote) -> Result { + if self.rak.quote_policy().is_none() { + info!(self.logger, "Configuring quote policy"); + + // Obtain current quote policy from (verified) consensus state. + let ctx = ctx.freeze(); + let consensus_state = self.consensus_verifier.latest_state()?; + let registry_state = RegistryState::new(&consensus_state); + let runtime = registry_state + .runtime(Context::create_child(&ctx), &self.runtime_id)? + .ok_or(anyhow!("missing runtime descriptor"))?; + let ad = runtime + .deployment_for_version(self.version) + .ok_or(anyhow!("no corresponding runtime deployment"))?; + + let policy = match runtime.tee_hardware { + TEEHardware::TEEHardwareIntelSGX => { + let sc: SGXConstraints = ad + .try_decode_tee() + .map_err(|_| anyhow!("bad TEE constraints"))?; + sc.policy() + } + _ => bail!("configured runtime hardware mismatch"), + }; + + self.rak.set_quote_policy(policy)?; + } + info!( self.logger, "Configuring quote for the runtime attestation key binding" ); - // Obtain current quote policy from (verified) consensus state. - let ctx = ctx.freeze(); - let consensus_state = self.consensus_verifier.latest_state()?; - let registry_state = RegistryState::new(&consensus_state); - let runtime = registry_state - .runtime(Context::create_child(&ctx), &self.runtime_id)? - .ok_or(anyhow!("missing runtime descriptor"))?; - let ad = runtime - .deployment_for_version(self.version) - .ok_or(anyhow!("no corresponding runtime deployment"))?; - - let policy = match runtime.tee_hardware { - TEEHardware::TEEHardwareIntelSGX => { - let sc: SGXConstraints = ad - .try_decode_tee() - .map_err(|_| anyhow!("bad TEE constraints"))?; - sc.policy() - } - _ => bail!("configured runtime hardware mismatch"), - }; - // Configure the quote and policy on the RAK. - let verified_quote = self.rak.set_quote(quote, policy)?; + let verified_quote = self.rak.set_quote(quote)?; // Sign the report data, latest verified consensus height and host node ID. + let consensus_state = self.consensus_verifier.latest_state()?; let height = consensus_state.height(); let node_id = self.host.identity()?; let h = SGXAttestation::hash(&verified_quote.report_data, node_id, height); diff --git a/runtime/src/rak.rs b/runtime/src/rak.rs index f598f00e18a..44d7a1ff70b 100644 --- a/runtime/src/rak.rs +++ b/runtime/src/rak.rs @@ -54,13 +54,17 @@ enum QuoteError { MrSignerMismatch, #[error("quote nonce mismatch")] NonceMismatch, + #[error("quote policy not set")] + QuotePolicyNotSet, + #[error("quote policy already set")] + QuotePolicyAlreadySet, } struct Inner { private_key: Option, quote: Option>, quote_timestamp: Option, - quote_policy: Option, + quote_policy: Option>, known_quotes: VecDeque>, #[allow(unused)] enclave_identity: Option, @@ -188,7 +192,7 @@ impl RAK { /// Configure the remote attestation quote for RAK. #[cfg(target_env = "sgx")] - pub(crate) fn set_quote(&self, quote: Quote, policy: QuotePolicy) -> Result { + pub(crate) fn set_quote(&self, quote: Quote) -> Result { let rak_pub = self.public_key().expect("RAK must be configured"); let mut inner = self.inner.write().unwrap(); @@ -206,7 +210,11 @@ impl RAK { // that failed. inner.nonce = None; - let verified_quote = quote.verify(&policy)?; + let policy = inner + .quote_policy + .as_ref() + .ok_or(QuoteError::QuotePolicyNotSet)?; + let verified_quote = quote.verify(policy)?; let nonce = &verified_quote.report_data[32..]; if expected_nonce.as_ref() != nonce { return Err(QuoteError::NonceMismatch.into()); @@ -239,7 +247,6 @@ impl RAK { let quote = Arc::new(quote); inner.quote = Some(quote.clone()); inner.quote_timestamp = Some(verified_quote.timestamp); - inner.quote_policy = Some(policy); // Keep around last two valid quotes to allow for transition as node registration does not // happen immediately after a quote has been verified by the runtime. @@ -251,6 +258,18 @@ impl RAK { Ok(verified_quote) } + /// Configure the runtime quote policy. + #[cfg(target_env = "sgx")] + pub(crate) fn set_quote_policy(&self, policy: QuotePolicy) -> Result<()> { + let mut inner = self.inner.write().unwrap(); + if inner.quote_policy.is_some() { + return Err(QuoteError::QuotePolicyAlreadySet.into()); + } + inner.quote_policy = Some(Arc::new(policy)); + + Ok(()) + } + /// Public part of RAK. /// /// This method may return `None` in the case where the enclave is not @@ -287,6 +306,16 @@ impl RAK { inner.quote.clone() } + /// Runtime quote policy. + /// + /// This method may return `None` in the case where the enclave is not + /// running on SGX hardware or if the quote policy has not yet been + /// fetched from the consensus layer. + pub fn quote_policy(&self) -> Option> { + let inner = self.inner.read().unwrap(); + inner.quote_policy.clone() + } + /// Verify a provided RAK binding. pub fn verify_binding(quote: &VerifiedQuote, rak: &PublicKey) -> Result<()> { if quote.report_data.len() < 32 { From 55106f87389856dc5165e9bcacfc32fa12f0ce62 Mon Sep 17 00:00:00 2001 From: Peter Nose Date: Fri, 9 Dec 2022 09:20:49 +0100 Subject: [PATCH 3/8] runtime/src/policy: Move SGX and quote policy verification to one place --- keymanager/src/api/errors.rs | 2 - keymanager/src/policy/cached.rs | 51 +++++------ runtime/src/attestation.rs | 37 +++----- runtime/src/lib.rs | 1 + runtime/src/policy.rs | 152 ++++++++++++++++++++++++++++++++ 5 files changed, 188 insertions(+), 55 deletions(-) create mode 100644 runtime/src/policy.rs diff --git a/keymanager/src/api/errors.rs b/keymanager/src/api/errors.rs index b60eb31e0c7..32f61b7c708 100644 --- a/keymanager/src/api/errors.rs +++ b/keymanager/src/api/errors.rs @@ -27,8 +27,6 @@ pub enum KeyManagerError { PolicyInvalid(#[from] anyhow::Error), #[error("policy has insufficient signatures")] PolicyInsufficientSignatures, - #[error("policy hasn't been published")] - PolicyNotPublished, #[error(transparent)] Other(anyhow::Error), } diff --git a/keymanager/src/policy/cached.rs b/keymanager/src/policy/cached.rs index 3beb16ec28e..04f6bfdc6da 100644 --- a/keymanager/src/policy/cached.rs +++ b/keymanager/src/policy/cached.rs @@ -6,7 +6,6 @@ use std::{ }; use anyhow::Result; -use io_context::Context; use lazy_static::lazy_static; use sgx_isa::Keypolicy; use tiny_keccak::{Hasher, Sha3}; @@ -19,10 +18,9 @@ use oasis_core_runtime::{ EnclaveIdentity, }, }, - consensus::{ - keymanager::SignedPolicySGX, state::keymanager::ImmutableState as KeyManagerState, - }, + consensus::keymanager::SignedPolicySGX, enclave_rpc::Context as RpcContext, + policy::PolicyVerifier, runtime_context, storage::KeyValue, }; @@ -64,33 +62,27 @@ impl Policy { } /// Initialize (or update) the policy state. - pub fn init(&self, ctx: &mut RpcContext, raw_policy: &[u8]) -> Result> { + pub fn init(&self, ctx: &mut RpcContext, policy_raw: &[u8]) -> Result> { // If this is an insecure build, don't bother trying to apply any policy. if Self::unsafe_skip() { return Ok(vec![]); } - // De-serialize the new policy, verify signatures. - let new_policy = CachedPolicy::parse(raw_policy)?; - // Ensure the new policy's runtime ID matches the current enclave's. let rctx = runtime_context!(ctx, KmContext); - if rctx.runtime_id != new_policy.runtime_id { - return Err(KeyManagerError::PolicyInvalidRuntime.into()); - } - - // Ensure the new policy was published in the consensus layer. - let state = ctx.consensus_verifier.latest_state()?; - let km_state = KeyManagerState::new(&state); - let published_policy = km_state - .status(Context::create_child(&ctx.io_ctx), new_policy.runtime_id)? - .ok_or(KeyManagerError::PolicyNotPublished)? - .policy - .ok_or(KeyManagerError::PolicyNotPublished)?; - let untrusted_policy: SignedPolicySGX = cbor::from_slice(raw_policy)?; - if untrusted_policy != published_policy { - return Err(KeyManagerError::PolicyNotPublished.into()); - } + let key_manager = rctx.runtime_id; + + // De-serialize the new policy, verify runtime ID and signatures. + let ioctx = ctx.io_ctx.clone(); + let consensus_verifier = ctx.consensus_verifier.clone(); + let untrusted_policy = cbor::from_slice(policy_raw)?; + let published_policy = PolicyVerifier::new(consensus_verifier).verify_key_manager_policy( + ioctx, + untrusted_policy, + key_manager, + false, + )?; + let new_policy = CachedPolicy::parse(published_policy, policy_raw)?; // Lock as late as possible. let mut inner = self.inner.write().unwrap(); @@ -115,7 +107,7 @@ impl Policy { } Ordering::Less => { // Persist then apply the new policy. - Self::save_raw_policy(ctx.untrusted_local_storage, raw_policy); + Self::save_raw_policy(ctx.untrusted_local_storage, policy_raw); let new_checksum = new_policy.checksum.clone(); inner.policy = Some(new_policy); @@ -192,7 +184,7 @@ impl Policy { unseal(Keypolicy::MRENCLAVE, POLICY_SEAL_CONTEXT, &ciphertext).map(|plaintext| { // Deserialization failures are fatal, because it is state corruption. - CachedPolicy::parse(&plaintext).expect("failed to deserialize persisted policy") + CachedPolicy::parse_raw(&plaintext).expect("failed to deserialize persisted policy") }) } @@ -217,9 +209,12 @@ struct CachedPolicy { } impl CachedPolicy { - fn parse(raw: &[u8]) -> Result { - // Parse out the signed policy. + fn parse_raw(raw: &[u8]) -> Result { let untrusted_policy: SignedPolicySGX = cbor::from_slice(raw)?; + Self::parse(untrusted_policy, raw) + } + + fn parse(untrusted_policy: SignedPolicySGX, raw: &[u8]) -> Result { let policy = verify_policy_and_trusted_signers(&untrusted_policy)?; let mut cached_policy = Self::default(); diff --git a/runtime/src/attestation.rs b/runtime/src/attestation.rs index ceed97e345a..143547455d1 100644 --- a/runtime/src/attestation.rs +++ b/runtime/src/attestation.rs @@ -2,7 +2,7 @@ use std::sync::Arc; #[cfg(target_env = "sgx")] -use anyhow::{anyhow, bail, Result}; +use anyhow::{bail, Result}; #[cfg(target_env = "sgx")] use io_context::Context; #[cfg(target_env = "sgx")] @@ -11,12 +11,9 @@ use slog::Logger; #[cfg(target_env = "sgx")] use crate::{ - common::crypto::signature::Signer, - common::sgx::Quote, - consensus::registry::{ - SGXAttestation, SGXConstraints, TEEHardware, ATTESTATION_SIGNATURE_CONTEXT, - }, - consensus::state::registry::ImmutableState as RegistryState, + common::{crypto::signature::Signer, sgx::Quote}, + consensus::registry::{SGXAttestation, ATTESTATION_SIGNATURE_CONTEXT}, + policy::PolicyVerifier, types::Body, }; use crate::{ @@ -109,24 +106,14 @@ impl Handler { // Obtain current quote policy from (verified) consensus state. let ctx = ctx.freeze(); - let consensus_state = self.consensus_verifier.latest_state()?; - let registry_state = RegistryState::new(&consensus_state); - let runtime = registry_state - .runtime(Context::create_child(&ctx), &self.runtime_id)? - .ok_or(anyhow!("missing runtime descriptor"))?; - let ad = runtime - .deployment_for_version(self.version) - .ok_or(anyhow!("no corresponding runtime deployment"))?; - - let policy = match runtime.tee_hardware { - TEEHardware::TEEHardwareIntelSGX => { - let sc: SGXConstraints = ad - .try_decode_tee() - .map_err(|_| anyhow!("bad TEE constraints"))?; - sc.policy() - } - _ => bail!("configured runtime hardware mismatch"), - }; + let consensus_verifier = self.consensus_verifier.clone(); + let version = Some(self.version); + let policy = PolicyVerifier::new(consensus_verifier).quote_policy( + ctx, + &self.runtime_id, + version, + true, + )?; self.rak.set_quote_policy(policy)?; } diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 7f14ec36383..f2ff039aa19 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -31,6 +31,7 @@ pub mod enclave_rpc; pub mod host; pub mod init; pub mod macros; +pub mod policy; pub mod protocol; pub mod rak; pub mod storage; diff --git a/runtime/src/policy.rs b/runtime/src/policy.rs new file mode 100644 index 00000000000..7916a3247cd --- /dev/null +++ b/runtime/src/policy.rs @@ -0,0 +1,152 @@ +//! Consensus SGX and quote policy handling. + +use std::sync::Arc; + +use anyhow::{bail, Result}; +use io_context::Context; +use slog::{debug, Logger}; +use thiserror::Error; + +use crate::{ + common::{logger::get_logger, namespace::Namespace, sgx::QuotePolicy, version::Version}, + consensus::{ + keymanager::SignedPolicySGX, + registry::{SGXConstraints, TEEHardware}, + state::{ + beacon::ImmutableState as BeaconState, keymanager::ImmutableState as KeyManagerState, + registry::ImmutableState as RegistryState, + }, + verifier::Verifier, + HEIGHT_LATEST, + }, +}; + +/// Policy verifier error. +#[derive(Error, Debug)] +pub enum PolicyVerifierError { + #[error("missing runtime descriptor")] + MissingRuntimeDescriptor, + #[error("no corresponding runtime deployment")] + NoDeployment, + #[error("bad TEE constraints")] + BadTEEConstraints, + #[error("policy hasn't been published")] + PolicyNotPublished, + #[error("configured runtime hardware mismatch")] + HardwareMismatch, +} + +/// Consensus policy verifier. +pub struct PolicyVerifier { + consensus_verifier: Arc, + logger: Logger, +} + +impl PolicyVerifier { + /// Create a new consensus policy verifier. + pub fn new(consensus_verifier: Arc) -> Self { + let logger = get_logger("runtime/policy_verifier"); + Self { + consensus_verifier, + logger, + } + } + + /// Fetch runtime's quote policy from the latest verified consensus layer state. + /// + /// If the runtime version is not provided, the policy for the active deployment is returned. + pub fn quote_policy( + &self, + ctx: Arc, + runtime_id: &Namespace, + version: Option, + use_latest_state: bool, + ) -> Result { + // Verify to the latest height, if needed. + let consensus_state = if use_latest_state { + self.consensus_verifier.latest_state()? + } else { + self.consensus_verifier.state_at(HEIGHT_LATEST)? + }; + + // Fetch quote policy from the consensus layer using the given or the active version. + let registry_state = RegistryState::new(&consensus_state); + let runtime = registry_state + .runtime(Context::create_child(&ctx), runtime_id)? + .ok_or(PolicyVerifierError::MissingRuntimeDescriptor)?; + + let ad = match version { + Some(version) => runtime + .deployment_for_version(version) + .ok_or(PolicyVerifierError::NoDeployment)?, + None => { + let beacon_state = BeaconState::new(&consensus_state); + let epoch = beacon_state.epoch(Context::create_child(&ctx))?; + + runtime + .active_deployment(epoch) + .ok_or(PolicyVerifierError::NoDeployment)? + } + }; + + let policy = match runtime.tee_hardware { + TEEHardware::TEEHardwareIntelSGX => { + let sc: SGXConstraints = ad + .try_decode_tee() + .map_err(|_| PolicyVerifierError::BadTEEConstraints)?; + sc.policy() + } + _ => bail!(PolicyVerifierError::HardwareMismatch), + }; + + Ok(policy) + } + + /// Fetch key manager's policy from the latest verified consensus layer state. + pub fn key_manager_policy( + &self, + ctx: Arc, + key_manager: Namespace, + use_latest_state: bool, + ) -> Result { + // Verify to the latest height, if needed. + let consensus_state = if use_latest_state { + self.consensus_verifier.latest_state()? + } else { + self.consensus_verifier.state_at(HEIGHT_LATEST)? + }; + + // Fetch policy from the consensus layer. + let km_state = KeyManagerState::new(&consensus_state); + let policy = km_state + .status(Context::create_child(&ctx), key_manager)? + .ok_or(PolicyVerifierError::PolicyNotPublished)? + .policy + .ok_or(PolicyVerifierError::PolicyNotPublished)?; + + Ok(policy) + } + + /// Verify that key manager's policy has been published in the consensus layer. + pub fn verify_key_manager_policy( + &self, + ctx: Arc, + policy: SignedPolicySGX, + key_manager: Namespace, + use_latest_state: bool, + ) -> Result { + let published_policy = self.key_manager_policy(ctx, key_manager, use_latest_state)?; + + if policy != published_policy { + debug!( + self.logger, + "key manager policy mismatch"; + "untrusted" => ?policy, + "published" => ?published_policy, + ); + return Err(PolicyVerifierError::PolicyNotPublished.into()); + } + + Ok(published_policy) + } +} From 97bc6d640a4d739f731ce4a1a8569492f56b591b Mon Sep 17 00:00:00 2001 From: Peter Nose Date: Fri, 9 Dec 2022 09:39:55 +0100 Subject: [PATCH 4/8] keymanager/src/client/remote: Verify key manager policy Verify that the policy belongs to the key manager and that it has been published in the consensus layer. --- keymanager/src/client/remote.rs | 4 +-- runtime/src/dispatcher.rs | 36 +++++++++++++++++----- runtime/src/enclave_rpc/dispatcher.rs | 8 +++-- runtime/src/policy.rs | 26 ++++++++++++++++ tests/runtimes/simple-keyvalue/src/main.rs | 4 +-- 5 files changed, 63 insertions(+), 15 deletions(-) diff --git a/keymanager/src/client/remote.rs b/keymanager/src/client/remote.rs index 9e3ab15ca20..e727f948d9a 100644 --- a/keymanager/src/client/remote.rs +++ b/keymanager/src/client/remote.rs @@ -142,9 +142,7 @@ impl RemoteClient { } /// Set client allowed enclaves from key manager policy. - pub fn set_policy(&self, signed_policy_raw: Vec) -> Result<(), KeyManagerError> { - let untrusted_policy: SignedPolicySGX = cbor::from_slice(&signed_policy_raw) - .map_err(|err| KeyManagerError::PolicyInvalid(err.into()))?; + pub fn set_policy(&self, untrusted_policy: SignedPolicySGX) -> Result<(), KeyManagerError> { let policy = verify_policy_and_trusted_signers(&untrusted_policy)?; let policies: HashSet = diff --git a/runtime/src/dispatcher.rs b/runtime/src/dispatcher.rs index e5f165503f1..214acf9d18d 100644 --- a/runtime/src/dispatcher.rs +++ b/runtime/src/dispatcher.rs @@ -9,7 +9,7 @@ use std::{ thread, }; -use anyhow::Result as AnyResult; +use anyhow::{anyhow, Result as AnyResult}; use io_context::Context; use slog::{debug, error, info, warn, Logger}; use tokio::sync::mpsc; @@ -35,6 +35,7 @@ use crate::{ types::{Message as RpcMessage, Request as RpcRequest}, Context as RpcContext, }, + policy::PolicyVerifier, protocol::{Protocol, ProtocolUntrustedLocalStorage}, rak::RAK, storage::mkvs::{sync::NoopReadSyncer, OverlayTree, Root, RootType}, @@ -140,6 +141,7 @@ struct State { txn_dispatcher: Arc, #[cfg_attr(not(target_env = "sgx"), allow(unused))] attestation_handler: attestation::Handler, + policy_verifier: Arc, cache_set: cache::CacheSet, } @@ -273,10 +275,11 @@ impl Dispatcher { attestation_handler: attestation::Handler::new( self.rak.clone(), protocol.clone(), - consensus_verifier, + consensus_verifier.clone(), protocol.get_runtime_id(), protocol.get_config().version, ), + policy_verifier: Arc::new(PolicyVerifier::new(consensus_verifier)), cache_set: cache::CacheSet::new(protocol.clone()), }; @@ -447,8 +450,8 @@ impl Dispatcher { // Other requests. Body::RuntimeKeyManagerPolicyUpdateRequest { signed_policy_raw } => { - // KeyManager policy update local RPC call. - self.handle_km_policy_update(&state.rpc_dispatcher, ctx, signed_policy_raw) + // Key manager policy update local RPC call. + self.handle_km_policy_update(ctx, state, signed_policy_raw) } Body::RuntimeConsensusSyncRequest { height } => state .consensus_verifier @@ -973,8 +976,8 @@ impl Dispatcher { fn handle_km_policy_update( &self, - rpc_dispatcher: &RpcDispatcher, - _ctx: Context, + ctx: Context, + state: State, signed_policy_raw: Vec, ) -> Result { // Make sure to abort the process on panic during policy processing as that indicates a @@ -982,7 +985,26 @@ impl Dispatcher { let _guard = AbortOnPanic; debug!(self.logger, "Received km policy update request"); - rpc_dispatcher.handle_km_policy_update(signed_policy_raw); + + // Verify and decode the policy. + let ctx = ctx.freeze(); + let runtime_id = state.protocol.get_host_info().runtime_id; + let key_manager = state + .policy_verifier + .key_manager(ctx.clone(), &runtime_id, true)?; + let untrusted_policy = cbor::from_slice(&signed_policy_raw).map_err(|err| anyhow!(err))?; + let published_policy = state.policy_verifier.verify_key_manager_policy( + ctx, + untrusted_policy, + key_manager, + false, + )?; + + // Dispatch the local RPC call. + state + .rpc_dispatcher + .handle_km_policy_update(published_policy); + debug!(self.logger, "KM policy update request complete"); Ok(Body::RuntimeKeyManagerPolicyUpdateResponse {}) diff --git a/runtime/src/enclave_rpc/dispatcher.rs b/runtime/src/enclave_rpc/dispatcher.rs index 6e637bd7cfe..d289f693f43 100644 --- a/runtime/src/enclave_rpc/dispatcher.rs +++ b/runtime/src/enclave_rpc/dispatcher.rs @@ -4,6 +4,8 @@ use std::collections::HashMap; use anyhow::Result; use thiserror::Error; +use crate::consensus::keymanager::SignedPolicySGX; + use super::{ context::Context, types::{Body, Request, Response}, @@ -124,7 +126,7 @@ impl Method { } /// Key manager policy update handler callback. -pub type KeyManagerPolicyHandler = dyn Fn(Vec) + Send + Sync; +pub type KeyManagerPolicyHandler = dyn Fn(SignedPolicySGX) + Send + Sync; /// RPC call dispatcher. #[derive(Default)] @@ -209,9 +211,9 @@ impl Dispatcher { } /// Handle key manager policy update. - pub fn handle_km_policy_update(&self, signed_policy_raw: Vec) { + pub fn handle_km_policy_update(&self, policy: SignedPolicySGX) { if let Some(handler) = self.km_policy_handler.as_ref() { - handler(signed_policy_raw) + handler(policy) } } diff --git a/runtime/src/policy.rs b/runtime/src/policy.rs index 7916a3247cd..4341d423ee7 100644 --- a/runtime/src/policy.rs +++ b/runtime/src/policy.rs @@ -34,6 +34,8 @@ pub enum PolicyVerifierError { PolicyNotPublished, #[error("configured runtime hardware mismatch")] HardwareMismatch, + #[error("runtime doesn't use key manager")] + NoKeyManager, } /// Consensus policy verifier. @@ -149,4 +151,28 @@ impl PolicyVerifier { Ok(published_policy) } + + /// Fetch runtime's key manager. + pub fn key_manager( + &self, + ctx: Arc, + runtime_id: &Namespace, + use_latest_state: bool, + ) -> Result { + let consensus_state = if use_latest_state { + self.consensus_verifier.latest_state()? + } else { + self.consensus_verifier.state_at(HEIGHT_LATEST)? + }; + + let registry_state = RegistryState::new(&consensus_state); + let runtime = registry_state + .runtime(Context::create_child(&ctx), runtime_id)? + .ok_or(PolicyVerifierError::MissingRuntimeDescriptor)?; + let key_manager = runtime + .key_manager + .ok_or(PolicyVerifierError::NoKeyManager)?; + + Ok(key_manager) + } } diff --git a/tests/runtimes/simple-keyvalue/src/main.rs b/tests/runtimes/simple-keyvalue/src/main.rs index bb638285a42..9fd1e9f6d7b 100644 --- a/tests/runtimes/simple-keyvalue/src/main.rs +++ b/tests/runtimes/simple-keyvalue/src/main.rs @@ -380,9 +380,9 @@ pub fn main_with_version(version: Version) { #[cfg(target_env = "sgx")] state .rpc_dispatcher - .set_keymanager_policy_update_handler(Some(Box::new(move |raw_signed_policy| { + .set_keymanager_policy_update_handler(Some(Box::new(move |policy| { key_manager - .set_policy(raw_signed_policy) + .set_policy(policy) .expect("failed to update km client policy"); }))); From 0744f28cdbd3511938f5b7b036a4f6c8353a56c2 Mon Sep 17 00:00:00 2001 From: Peter Nose Date: Thu, 8 Dec 2022 10:05:24 +0100 Subject: [PATCH 5/8] go/runtime/registry: Fix watching policy updates When multiple key managers were running, the last known status of the runtime's key manager was overwritten with each status update. On runtime (re)starts, this resulted in the wrong policy being set. --- .changelog/5092.bugfix.md | 5 +++++ go/runtime/registry/host.go | 5 +++-- 2 files changed, 8 insertions(+), 2 deletions(-) create mode 100644 .changelog/5092.bugfix.md diff --git a/.changelog/5092.bugfix.md b/.changelog/5092.bugfix.md new file mode 100644 index 00000000000..d5d05c8e37a --- /dev/null +++ b/.changelog/5092.bugfix.md @@ -0,0 +1,5 @@ +go/runtime/registry: Fix watching policy updates + +When multiple key managers were running, the last known status of the +runtime's key manager was overwritten with each status update. On runtime +(re)starts, this resulted in the wrong policy being set. diff --git a/go/runtime/registry/host.go b/go/runtime/registry/host.go index 98a427aa2e0..03afe3126d9 100644 --- a/go/runtime/registry/host.go +++ b/go/runtime/registry/host.go @@ -491,12 +491,13 @@ func (n *runtimeHostNotifier) watchPolicyUpdates() { ) continue } - case st = <-stCh: + case newSt := <-stCh: // Ignore status updates if key manager is not yet known (is nil) // or if the status update is for a different key manager. - if rtDsc == nil || !st.ID.Equal(rtDsc.KeyManager) { + if rtDsc == nil || !newSt.ID.Equal(rtDsc.KeyManager) { continue } + st = newSt case ev := <-evCh: // Runtime host changes, make sure to update the policy if runtime is restarted. if ev.Started == nil && ev.Updated == nil { From a67a15904c487c412cce145575d3a87e024c419b Mon Sep 17 00:00:00 2001 From: Peter Nose Date: Mon, 5 Dec 2022 08:51:36 +0100 Subject: [PATCH 6/8] go/runtime/registry: Refactor watching policy updates --- go/common/namespace.go | 5 +- go/runtime/registry/host.go | 138 ++++++++++++++++++++++-------------- 2 files changed, 89 insertions(+), 54 deletions(-) diff --git a/go/common/namespace.go b/go/common/namespace.go index 1b32d93d57d..759f12aa25d 100644 --- a/go/common/namespace.go +++ b/go/common/namespace.go @@ -105,7 +105,10 @@ func (n *Namespace) UnmarshalBase64(text []byte) error { // Equal compares vs another namespace for equality. func (n *Namespace) Equal(cmp *Namespace) bool { - if cmp == nil { + if n == cmp { + return true + } + if n == nil || cmp == nil { return false } return bytes.Equal(n[:], cmp[:]) diff --git a/go/runtime/registry/host.go b/go/runtime/registry/host.go index 03afe3126d9..cb98063bafb 100644 --- a/go/runtime/registry/host.go +++ b/go/runtime/registry/host.go @@ -9,6 +9,7 @@ import ( "github.com/eapache/channels" + "github.com/oasisprotocol/oasis-core/go/common" "github.com/oasisprotocol/oasis-core/go/common/cbor" "github.com/oasisprotocol/oasis-core/go/common/identity" "github.com/oasisprotocol/oasis-core/go/common/logging" @@ -444,10 +445,66 @@ func (n *runtimeHostNotifier) watchPolicyUpdates() { } defer dscSub.Close() + var ( + kmRtID *common.Namespace + done bool + ) + + for !done { + done = func() bool { + // Start watching key manager policy updates. + var wg sync.WaitGroup + defer wg.Wait() + + ctx, cancel := context.WithCancel(n.ctx) + defer cancel() + + wg.Add(1) + go func(kmRtID *common.Namespace) { + defer wg.Done() + n.watchKmPolicyUpdates(ctx, kmRtID) + }(kmRtID) + + // Restart the updater if the runtime changes the key manager. This should happen + // at most once as runtimes are not allowed to change the manager once set. + for { + select { + case <-n.ctx.Done(): + n.logger.Debug("context canceled") + return true + case <-n.stopCh: + n.logger.Debug("termination requested") + return true + case rtDsc := <-dscCh: + n.logger.Debug("got registry descriptor update") + + if rtDsc.Kind != registry.KindCompute { + return true + } + + if kmRtID.Equal(rtDsc.KeyManager) { + break + } + + kmRtID = rtDsc.KeyManager + return false + } + } + }() + } +} + +func (n *runtimeHostNotifier) watchKmPolicyUpdates(ctx context.Context, kmRtID *common.Namespace) { + // No need to watch anything if key manager is not set. + if kmRtID == nil { + return + } + + n.logger.Debug("watching key manager policy updates", "keymanager", kmRtID) + // Subscribe to key manager status updates. stCh, stSub := n.consensus.KeyManager().WatchStatuses() defer stSub.Close() - n.logger.Debug("watching policy updates") // Subscribe to runtime host events. evCh, evSub, err := n.host.WatchEvents(n.ctx) @@ -459,76 +516,51 @@ func (n *runtimeHostNotifier) watchPolicyUpdates() { } defer evSub.Close() - var ( - rtDsc *registry.Runtime - st *keymanager.Status - ) + var st *keymanager.Status + for { select { - case <-n.ctx.Done(): - n.logger.Debug("context canceled") + case <-ctx.Done(): return - case <-n.stopCh: - n.logger.Debug("termination requested") - return - case rtDsc = <-dscCh: - n.logger.Debug("got registry descriptor update") - - // Ignore updates if key manager is not needed. - if rtDsc.KeyManager == nil { - n.logger.Debug("no key manager needed for this runtime") - continue - } - - var err error - st, err = n.consensus.KeyManager().GetStatus(n.ctx, ®istry.NamespaceQuery{ - Height: consensus.HeightLatest, - ID: *rtDsc.KeyManager, - }) - if err != nil { - n.logger.Warn("failed to fetch key manager status", - "err", err, - ) - continue - } case newSt := <-stCh: - // Ignore status updates if key manager is not yet known (is nil) - // or if the status update is for a different key manager. - if rtDsc == nil || !newSt.ID.Equal(rtDsc.KeyManager) { + // Ignore status updates for a different key manager. + if !newSt.ID.Equal(kmRtID) { continue } st = newSt + n.updateKeyManagerPolicy(ctx, st.Policy) case ev := <-evCh: // Runtime host changes, make sure to update the policy if runtime is restarted. if ev.Started == nil && ev.Updated == nil { continue } + // Make sure that we actually have a policy. + if st != nil { + n.updateKeyManagerPolicy(ctx, st.Policy) + } } + } +} - // Make sure that we actually have a policy. - if st == nil { - continue - } +func (n *runtimeHostNotifier) updateKeyManagerPolicy(ctx context.Context, policy *keymanager.SignedPolicySGX) { + n.logger.Debug("got key manager policy update", "policy", policy) - // Update key manager policy. - n.logger.Debug("got policy update", "status", st) + raw := cbor.Marshal(policy) + req := &protocol.Body{RuntimeKeyManagerPolicyUpdateRequest: &protocol.RuntimeKeyManagerPolicyUpdateRequest{ + SignedPolicyRaw: raw, + }} - raw := cbor.Marshal(st.Policy) - req := &protocol.Body{RuntimeKeyManagerPolicyUpdateRequest: &protocol.RuntimeKeyManagerPolicyUpdateRequest{ - SignedPolicyRaw: raw, - }} + ctx, cancel := context.WithTimeout(ctx, notifyTimeout) + defer cancel() - ctx, cancel := context.WithTimeout(n.ctx, notifyTimeout) - response, err := n.host.Call(ctx, req) - cancel() - if err != nil { - n.logger.Error("failed dispatching key manager policy update to runtime", - "err", err, - ) - continue - } - n.logger.Debug("key manager policy updated dispatched", "response", response) + if _, err := n.host.Call(ctx, req); err != nil { + n.logger.Error("failed dispatching key manager policy update to runtime", + "err", err, + ) + return } + + n.logger.Debug("key manager policy update dispatched") } func (n *runtimeHostNotifier) watchConsensusLightBlocks() { From 52536d29300a0c25b0f8865306fd625d8e727143 Mon Sep 17 00:00:00 2001 From: Peter Nose Date: Thu, 8 Dec 2022 02:36:40 +0100 Subject: [PATCH 7/8] runtime/src/enclave_rpc: Verify RPC quotes with key manager quote policy --- .changelog/5092.feature.md | 1 + go/registry/api/runtime.go | 6 ++ go/runtime/host/protocol/types.go | 67 +++++++++-------- go/runtime/registry/host.go | 86 ++++++++++++++++++++-- keymanager/src/client/remote.rs | 30 ++++++-- keymanager/src/crypto/kdf.rs | 3 +- runtime/src/common/sgx/ias.rs | 2 +- runtime/src/common/sgx/mod.rs | 2 +- runtime/src/common/sgx/pcs.rs | 2 +- runtime/src/dispatcher.rs | 41 +++++++++++ runtime/src/enclave_rpc/client.rs | 20 +++-- runtime/src/enclave_rpc/demux.rs | 1 + runtime/src/enclave_rpc/dispatcher.rs | 21 +++++- runtime/src/enclave_rpc/session.rs | 46 ++++++++---- runtime/src/policy.rs | 24 ++++++ runtime/src/protocol.rs | 1 + runtime/src/types.rs | 6 +- tests/runtimes/simple-keyvalue/src/main.rs | 26 ++++--- 18 files changed, 309 insertions(+), 76 deletions(-) create mode 100644 .changelog/5092.feature.md diff --git a/.changelog/5092.feature.md b/.changelog/5092.feature.md new file mode 100644 index 00000000000..644c3e2d2c2 --- /dev/null +++ b/.changelog/5092.feature.md @@ -0,0 +1 @@ +runtime/src/enclave_rpc: Verify RPC quotes with key manager quote policy diff --git a/go/registry/api/runtime.go b/go/registry/api/runtime.go index 82f82f99612..4901388fdf0 100644 --- a/go/registry/api/runtime.go +++ b/go/registry/api/runtime.go @@ -648,6 +648,12 @@ type VersionInfo struct { // Equal compares vs another VersionInfo for equality. func (vi *VersionInfo) Equal(cmp *VersionInfo) bool { + if vi == cmp { + return true + } + if vi == nil || cmp == nil { + return false + } if vi.Version.ToU64() != cmp.Version.ToU64() { return false } diff --git a/go/runtime/host/protocol/types.go b/go/runtime/host/protocol/types.go index c55550c75eb..10f31999736 100644 --- a/go/runtime/host/protocol/types.go +++ b/go/runtime/host/protocol/types.go @@ -67,34 +67,36 @@ type Body struct { Error *Error `json:",omitempty"` // Runtime interface. - RuntimeInfoRequest *RuntimeInfoRequest `json:",omitempty"` - RuntimeInfoResponse *RuntimeInfoResponse `json:",omitempty"` - RuntimePingRequest *Empty `json:",omitempty"` - RuntimeShutdownRequest *Empty `json:",omitempty"` - RuntimeCapabilityTEERakInitRequest *RuntimeCapabilityTEERakInitRequest `json:",omitempty"` - RuntimeCapabilityTEERakInitResponse *Empty `json:",omitempty"` - RuntimeCapabilityTEERakReportRequest *Empty `json:",omitempty"` - RuntimeCapabilityTEERakReportResponse *RuntimeCapabilityTEERakReportResponse `json:",omitempty"` - RuntimeCapabilityTEERakAvrRequest *RuntimeCapabilityTEERakAvrRequest `json:",omitempty"` - RuntimeCapabilityTEERakAvrResponse *Empty `json:",omitempty"` - RuntimeCapabilityTEERakQuoteRequest *RuntimeCapabilityTEERakQuoteRequest `json:",omitempty"` - RuntimeCapabilityTEERakQuoteResponse *RuntimeCapabilityTEERakQuoteResponse `json:",omitempty"` - RuntimeRPCCallRequest *RuntimeRPCCallRequest `json:",omitempty"` - RuntimeRPCCallResponse *RuntimeRPCCallResponse `json:",omitempty"` - RuntimeLocalRPCCallRequest *RuntimeLocalRPCCallRequest `json:",omitempty"` - RuntimeLocalRPCCallResponse *RuntimeLocalRPCCallResponse `json:",omitempty"` - RuntimeCheckTxBatchRequest *RuntimeCheckTxBatchRequest `json:",omitempty"` - RuntimeCheckTxBatchResponse *RuntimeCheckTxBatchResponse `json:",omitempty"` - RuntimeExecuteTxBatchRequest *RuntimeExecuteTxBatchRequest `json:",omitempty"` - RuntimeExecuteTxBatchResponse *RuntimeExecuteTxBatchResponse `json:",omitempty"` - RuntimeAbortRequest *Empty `json:",omitempty"` - RuntimeAbortResponse *Empty `json:",omitempty"` - RuntimeKeyManagerPolicyUpdateRequest *RuntimeKeyManagerPolicyUpdateRequest `json:",omitempty"` - RuntimeKeyManagerPolicyUpdateResponse *Empty `json:",omitempty"` - RuntimeQueryRequest *RuntimeQueryRequest `json:",omitempty"` - RuntimeQueryResponse *RuntimeQueryResponse `json:",omitempty"` - RuntimeConsensusSyncRequest *RuntimeConsensusSyncRequest `json:",omitempty"` - RuntimeConsensusSyncResponse *Empty `json:",omitempty"` + RuntimeInfoRequest *RuntimeInfoRequest `json:",omitempty"` + RuntimeInfoResponse *RuntimeInfoResponse `json:",omitempty"` + RuntimePingRequest *Empty `json:",omitempty"` + RuntimeShutdownRequest *Empty `json:",omitempty"` + RuntimeCapabilityTEERakInitRequest *RuntimeCapabilityTEERakInitRequest `json:",omitempty"` + RuntimeCapabilityTEERakInitResponse *Empty `json:",omitempty"` + RuntimeCapabilityTEERakReportRequest *Empty `json:",omitempty"` + RuntimeCapabilityTEERakReportResponse *RuntimeCapabilityTEERakReportResponse `json:",omitempty"` + RuntimeCapabilityTEERakAvrRequest *RuntimeCapabilityTEERakAvrRequest `json:",omitempty"` + RuntimeCapabilityTEERakAvrResponse *Empty `json:",omitempty"` + RuntimeCapabilityTEERakQuoteRequest *RuntimeCapabilityTEERakQuoteRequest `json:",omitempty"` + RuntimeCapabilityTEERakQuoteResponse *RuntimeCapabilityTEERakQuoteResponse `json:",omitempty"` + RuntimeRPCCallRequest *RuntimeRPCCallRequest `json:",omitempty"` + RuntimeRPCCallResponse *RuntimeRPCCallResponse `json:",omitempty"` + RuntimeLocalRPCCallRequest *RuntimeLocalRPCCallRequest `json:",omitempty"` + RuntimeLocalRPCCallResponse *RuntimeLocalRPCCallResponse `json:",omitempty"` + RuntimeCheckTxBatchRequest *RuntimeCheckTxBatchRequest `json:",omitempty"` + RuntimeCheckTxBatchResponse *RuntimeCheckTxBatchResponse `json:",omitempty"` + RuntimeExecuteTxBatchRequest *RuntimeExecuteTxBatchRequest `json:",omitempty"` + RuntimeExecuteTxBatchResponse *RuntimeExecuteTxBatchResponse `json:",omitempty"` + RuntimeAbortRequest *Empty `json:",omitempty"` + RuntimeAbortResponse *Empty `json:",omitempty"` + RuntimeKeyManagerPolicyUpdateRequest *RuntimeKeyManagerPolicyUpdateRequest `json:",omitempty"` + RuntimeKeyManagerPolicyUpdateResponse *Empty `json:",omitempty"` + RuntimeKeyManagerQuotePolicyUpdateRequest *RuntimeKeyManagerQuotePolicyUpdateRequest `json:",omitempty"` + RuntimeKeyManagerQuotePolicyUpdateResponse *Empty `json:",omitempty"` + RuntimeQueryRequest *RuntimeQueryRequest `json:",omitempty"` + RuntimeQueryResponse *RuntimeQueryResponse `json:",omitempty"` + RuntimeConsensusSyncRequest *RuntimeConsensusSyncRequest `json:",omitempty"` + RuntimeConsensusSyncResponse *Empty `json:",omitempty"` // Host interface. HostRPCCallRequest *HostRPCCallRequest `json:",omitempty"` @@ -395,12 +397,17 @@ type RuntimeExecuteTxBatchResponse struct { Deprecated1 cbor.RawMessage `json:"batch_weight_limits,omitempty"` } -// RuntimeKeyManagerPolicyUpdateRequest is a runtime key manager policy request -// message body. +// RuntimeKeyManagerPolicyUpdateRequest is a runtime key manager policy request message body. type RuntimeKeyManagerPolicyUpdateRequest struct { SignedPolicyRaw []byte `json:"signed_policy_raw"` } +// RuntimeKeyManagerQuotePolicyUpdateRequest is a runtime key manager quote policy request +// message body. +type RuntimeKeyManagerQuotePolicyUpdateRequest struct { + Policy quote.Policy `json:"policy"` +} + // RuntimeQueryRequest is a runtime query request message body. type RuntimeQueryRequest struct { // ConsensusBlock is the consensus light block at the last finalized round diff --git a/go/runtime/registry/host.go b/go/runtime/registry/host.go index cb98063bafb..43842b047d0 100644 --- a/go/runtime/registry/host.go +++ b/go/runtime/registry/host.go @@ -13,6 +13,8 @@ import ( "github.com/oasisprotocol/oasis-core/go/common/cbor" "github.com/oasisprotocol/oasis-core/go/common/identity" "github.com/oasisprotocol/oasis-core/go/common/logging" + "github.com/oasisprotocol/oasis-core/go/common/node" + "github.com/oasisprotocol/oasis-core/go/common/sgx/quote" "github.com/oasisprotocol/oasis-core/go/common/version" consensus "github.com/oasisprotocol/oasis-core/go/consensus/api" consensusResults "github.com/oasisprotocol/oasis-core/go/consensus/api/transaction/results" @@ -502,11 +504,21 @@ func (n *runtimeHostNotifier) watchKmPolicyUpdates(ctx context.Context, kmRtID * n.logger.Debug("watching key manager policy updates", "keymanager", kmRtID) - // Subscribe to key manager status updates. + // Subscribe to key manager status updates (policy might change). stCh, stSub := n.consensus.KeyManager().WatchStatuses() defer stSub.Close() - // Subscribe to runtime host events. + // Subscribe to epoch transitions (quote policy might change). + epoCh, sub, err := n.consensus.Beacon().WatchEpochs(ctx) + if err != nil { + n.logger.Error("failed to watch epochs", + "err", err, + ) + return + } + defer sub.Close() + + // Subscribe to runtime host events (policies will be lost on restarts). evCh, evSub, err := n.host.WatchEvents(n.ctx) if err != nil { n.logger.Error("failed to subscribe to runtime host events", @@ -516,7 +528,11 @@ func (n *runtimeHostNotifier) watchKmPolicyUpdates(ctx context.Context, kmRtID * } defer evSub.Close() - var st *keymanager.Status + var ( + st *keymanager.Status + sc *node.SGXConstraints + vi *registry.VersionInfo + ) for { select { @@ -528,16 +544,57 @@ func (n *runtimeHostNotifier) watchKmPolicyUpdates(ctx context.Context, kmRtID * continue } st = newSt + n.updateKeyManagerPolicy(ctx, st.Policy) + case epoch := <-epoCh: + // Check if the key manager was redeployed, as that is when a new quote policy might + // take effect. + dsc, err := n.consensus.Registry().GetRuntime(ctx, ®istry.GetRuntimeQuery{ + Height: consensus.HeightLatest, + ID: *kmRtID, + }) + if err != nil { + n.logger.Error("failed to query key manager runtime descriptor", + "err", err, + ) + continue + } + + // Quote polices can only be set on SGX hardwares. + if dsc.TEEHardware != node.TEEHardwareIntelSGX { + continue + } + + // No need to update the policy if the key manager is sill running the same version. + newVi := dsc.ActiveDeployment(epoch) + if newVi.Equal(vi) { + continue + } + vi = newVi + + // Parse SGX constraints. + var newSc node.SGXConstraints + if err := cbor.Unmarshal(vi.TEE, &newSc); err != nil { + n.logger.Error("malformed SGX constraints", + "err", err, + ) + continue + } + sc = &newSc + + n.updateKeyManagerQuotePolicy(ctx, sc.Policy) case ev := <-evCh: - // Runtime host changes, make sure to update the policy if runtime is restarted. + // Runtime host changes, make sure to update the policies if runtime is restarted. if ev.Started == nil && ev.Updated == nil { continue } - // Make sure that we actually have a policy. + // Make sure that we actually have policies. if st != nil { n.updateKeyManagerPolicy(ctx, st.Policy) } + if sc != nil { + n.updateKeyManagerQuotePolicy(ctx, sc.Policy) + } } } } @@ -563,6 +620,25 @@ func (n *runtimeHostNotifier) updateKeyManagerPolicy(ctx context.Context, policy n.logger.Debug("key manager policy update dispatched") } +func (n *runtimeHostNotifier) updateKeyManagerQuotePolicy(ctx context.Context, policy *quote.Policy) { + n.logger.Debug("got key manager quote policy update", "policy", policy) + + req := &protocol.Body{RuntimeKeyManagerQuotePolicyUpdateRequest: &protocol.RuntimeKeyManagerQuotePolicyUpdateRequest{ + Policy: *policy, + }} + + ctx, cancel := context.WithTimeout(ctx, notifyTimeout) + defer cancel() + + if _, err := n.host.Call(ctx, req); err != nil { + n.logger.Error("failed dispatching key manager quote policy update to runtime", + "err", err, + ) + return + } + n.logger.Debug("key manager quote policy update dispatched") +} + func (n *runtimeHostNotifier) watchConsensusLightBlocks() { rawCh, sub, err := n.consensus.WatchBlocks(n.ctx) if err != nil { diff --git a/keymanager/src/client/remote.rs b/keymanager/src/client/remote.rs index e727f948d9a..973ddae8e6a 100644 --- a/keymanager/src/client/remote.rs +++ b/keymanager/src/client/remote.rs @@ -11,7 +11,10 @@ use io_context::Context; use lru::LruCache; use oasis_core_runtime::{ - common::{namespace::Namespace, sgx::EnclaveIdentity}, + common::{ + namespace::Namespace, + sgx::{EnclaveIdentity, QuotePolicy}, + }, consensus::{beacon::EpochTime, keymanager::SignedPolicySGX, verifier::Verifier}, enclave_rpc::{client::RpcClient, session}, protocol::Protocol, @@ -76,10 +79,11 @@ impl RemoteClient { } /// Create a new key manager client with runtime-internal transport and explicit key manager - /// enclave identities. - pub fn new_runtime_with_enclave_identities( + /// enclave identities and quote policy. + pub fn new_runtime_with_enclaves_and_policy( runtime_id: Namespace, enclaves: Option>, + policy: Option>, protocol: Arc, consensus_verifier: Arc, rak: Arc, @@ -90,6 +94,7 @@ impl RemoteClient { RpcClient::new_runtime( session::Builder::default() .remote_enclaves(enclaves) + .quote_policy(policy) .local_rak(rak), protocol, KEY_MANAGER_ENDPOINT, @@ -101,9 +106,10 @@ impl RemoteClient { /// Create a new key manager client with runtime-internal transport. /// - /// Using this method valid enclave identities won't be preset and should be obtained via the - /// worker-host protocol and updated with the set_policy method. In case the signer set is - /// non-empty, session establishment will fail until the initial policies will be updated. + /// Using this method valid enclave identities and quote policy won't be preset and should + /// be obtained via the runtime-host protocol and updated with the `set_policy` and + /// `set_quote_policy` methods. In case the signer set is non-empty, session establishment + /// will fail until the initial policies will be updated. pub fn new_runtime( runtime_id: Namespace, protocol: Arc, @@ -128,12 +134,17 @@ impl RemoteClient { None }; + // Key manager's quote policy should be obtained via the runtime-host protocol. Until then, + // all quote verifications will fail with a missing quote policy error. + let policy = None; + // Configure trusted policy signers. set_trusted_policy_signers(signers); - Self::new_runtime_with_enclave_identities( + Self::new_runtime_with_enclaves_and_policy( runtime_id, enclaves, + policy, protocol, consensus_verifier, rak, @@ -151,6 +162,11 @@ impl RemoteClient { Ok(()) } + + /// Set key manager's quote policy. + pub fn set_quote_policy(&self, policy: QuotePolicy) { + self.inner.rpc_client.update_quote_policy(policy); + } } impl KeyManagerClient for RemoteClient { diff --git a/keymanager/src/crypto/kdf.rs b/keymanager/src/crypto/kdf.rs index 921505f5159..49168faabbf 100644 --- a/keymanager/src/crypto/kdf.rs +++ b/keymanager/src/crypto/kdf.rs @@ -298,9 +298,10 @@ impl Kdf { let rctx = runtime_context!(ctx, KmContext); - let km_client = RemoteClient::new_runtime_with_enclave_identities( + let km_client = RemoteClient::new_runtime_with_enclaves_and_policy( rctx.runtime_id, Policy::global().may_replicate_from(), + ctx.rak.quote_policy(), rctx.protocol.clone(), ctx.consensus_verifier.clone(), ctx.rak.clone(), diff --git a/runtime/src/common/sgx/ias.rs b/runtime/src/common/sgx/ias.rs index d07c4a55b20..19fdd7e5b58 100644 --- a/runtime/src/common/sgx/ias.rs +++ b/runtime/src/common/sgx/ias.rs @@ -116,7 +116,7 @@ lazy_static! { } /// Quote validity policy. -#[derive(Clone, Debug, Default, cbor::Encode, cbor::Decode)] +#[derive(Clone, Debug, Default, PartialEq, Eq, cbor::Encode, cbor::Decode)] pub struct QuotePolicy { /// Whether IAS quotes are disabled and will always be rejected. #[cbor(optional)] diff --git a/runtime/src/common/sgx/mod.rs b/runtime/src/common/sgx/mod.rs index 22600958319..f490240b625 100644 --- a/runtime/src/common/sgx/mod.rs +++ b/runtime/src/common/sgx/mod.rs @@ -96,7 +96,7 @@ impl Quote { } /// Quote validity policy. -#[derive(Clone, Debug, Default, cbor::Encode, cbor::Decode)] +#[derive(Clone, Debug, Default, PartialEq, Eq, cbor::Encode, cbor::Decode)] pub struct QuotePolicy { #[cbor(rename = "ias")] pub ias: Option, diff --git a/runtime/src/common/sgx/pcs.rs b/runtime/src/common/sgx/pcs.rs index 3b04379d824..e5a423848fe 100644 --- a/runtime/src/common/sgx/pcs.rs +++ b/runtime/src/common/sgx/pcs.rs @@ -97,7 +97,7 @@ pub enum Error { } /// Quote validity policy. -#[derive(Clone, Debug, cbor::Encode, cbor::Decode)] +#[derive(Clone, Debug, PartialEq, Eq, cbor::Encode, cbor::Decode)] pub struct QuotePolicy { /// Whether PCS quotes are disabled and will always be rejected. #[cbor(optional)] diff --git a/runtime/src/dispatcher.rs b/runtime/src/dispatcher.rs index 214acf9d18d..027514f6775 100644 --- a/runtime/src/dispatcher.rs +++ b/runtime/src/dispatcher.rs @@ -22,6 +22,7 @@ use crate::{ signature::{Signature, Signer}, }, logger::get_logger, + sgx::QuotePolicy, }, consensus::{ beacon::EpochTime, @@ -453,6 +454,12 @@ impl Dispatcher { // Key manager policy update local RPC call. self.handle_km_policy_update(ctx, state, signed_policy_raw) } + Body::RuntimeKeyManagerQuotePolicyUpdateRequest { + policy: quote_policy, + } => { + // Key manager quote policy update local RPC call. + self.handle_km_quote_policy_update(ctx, state, quote_policy) + } Body::RuntimeConsensusSyncRequest { height } => state .consensus_verifier .sync(height) @@ -1009,4 +1016,38 @@ impl Dispatcher { Ok(Body::RuntimeKeyManagerPolicyUpdateResponse {}) } + + fn handle_km_quote_policy_update( + &self, + ctx: Context, + state: State, + quote_policy: QuotePolicy, + ) -> Result { + // Make sure to abort the process on panic during quote policy processing as that indicates + // a serious problem and should make sure to clean up the process. + let _guard = AbortOnPanic; + + debug!(self.logger, "Received km quote policy update request"); + + // Verify and decode the policy. + let ctx = ctx.freeze(); + let runtime_id = state.protocol.get_host_info().runtime_id; + let key_manager = state + .policy_verifier + .key_manager(ctx.clone(), &runtime_id, true)?; + let policy = state.policy_verifier.verify_quote_policy( + ctx, + quote_policy, + &key_manager, + None, + false, + )?; + + // Dispatch the local RPC call. + state.rpc_dispatcher.handle_km_quote_policy_update(policy); + + debug!(self.logger, "KM quote policy update request complete"); + + Ok(Body::RuntimeKeyManagerQuotePolicyUpdateResponse {}) + } } diff --git a/runtime/src/enclave_rpc/client.rs b/runtime/src/enclave_rpc/client.rs index f41cdce10c5..c1166185513 100644 --- a/runtime/src/enclave_rpc/client.rs +++ b/runtime/src/enclave_rpc/client.rs @@ -1,6 +1,7 @@ //! Enclave RPC client. use std::{ collections::HashSet, + mem, sync::{ atomic::{AtomicBool, Ordering}, Arc, Mutex, @@ -18,7 +19,7 @@ use tokio; use crate::{ cbor, - common::sgx::EnclaveIdentity, + common::sgx::{EnclaveIdentity, QuotePolicy}, enclave_rpc::{ session::{Builder, Session}, types, @@ -356,13 +357,20 @@ impl RpcClient { } } - /// Update session enclaves if changed. + /// Update allowed remote enclave identities. + /// + /// Useful if the key manager's policy has changed. pub fn update_enclaves(&self, enclaves: Option>) { let mut session = self.inner.session.lock().unwrap(); - if session.builder.get_remote_enclaves() != &enclaves { - session.builder = session.builder.clone().remote_enclaves(enclaves); - session.reset(); - } + session.builder = mem::take(&mut session.builder).remote_enclaves(enclaves); + session.reset(); + } + + /// Update key manager's quote policy. + pub fn update_quote_policy(&self, policy: QuotePolicy) { + let mut session = self.inner.session.lock().unwrap(); + session.builder = mem::take(&mut session.builder).quote_policy(Some(Arc::new(policy))); + session.reset(); } } diff --git a/runtime/src/enclave_rpc/demux.rs b/runtime/src/enclave_rpc/demux.rs index d46a3e0314f..de03a932776 100644 --- a/runtime/src/enclave_rpc/demux.rs +++ b/runtime/src/enclave_rpc/demux.rs @@ -135,6 +135,7 @@ impl Demux { // Create a new session. if self.sessions.len() < self.max_concurrent_sessions { let mut session = Builder::default() + .quote_policy(self.rak.quote_policy()) .local_rak(self.rak.clone()) .build_responder(); let result = match session.process_data(frame.payload, writer).map(|m| { diff --git a/runtime/src/enclave_rpc/dispatcher.rs b/runtime/src/enclave_rpc/dispatcher.rs index d289f693f43..0cbf581e579 100644 --- a/runtime/src/enclave_rpc/dispatcher.rs +++ b/runtime/src/enclave_rpc/dispatcher.rs @@ -4,7 +4,7 @@ use std::collections::HashMap; use anyhow::Result; use thiserror::Error; -use crate::consensus::keymanager::SignedPolicySGX; +use crate::{common::sgx::QuotePolicy, consensus::keymanager::SignedPolicySGX}; use super::{ context::Context, @@ -127,6 +127,8 @@ impl Method { /// Key manager policy update handler callback. pub type KeyManagerPolicyHandler = dyn Fn(SignedPolicySGX) + Send + Sync; +/// Key manager quote policy update handler callback. +pub type KeyManagerQuotePolicyHandler = dyn Fn(QuotePolicy) + Send + Sync; /// RPC call dispatcher. #[derive(Default)] @@ -137,6 +139,8 @@ pub struct Dispatcher { local_methods: HashMap, /// Registered key manager policy handler. km_policy_handler: Option>, + /// Registered key manager quote policy handler. + km_quote_policy_handler: Option>, /// Registered context initializer. ctx_initializer: Option>, } @@ -217,6 +221,13 @@ impl Dispatcher { } } + /// Handle key manager quote policy update. + pub fn handle_km_quote_policy_update(&self, policy: QuotePolicy) { + if let Some(handler) = self.km_quote_policy_handler.as_ref() { + handler(policy) + } + } + /// Update key manager policy update handler. pub fn set_keymanager_policy_update_handler( &mut self, @@ -224,4 +235,12 @@ impl Dispatcher { ) { self.km_policy_handler = f; } + + /// Update key manager quote policy update handler. + pub fn set_keymanager_quote_policy_update_handler( + &mut self, + f: Option>, + ) { + self.km_quote_policy_handler = f; + } } diff --git a/runtime/src/enclave_rpc/session.rs b/runtime/src/enclave_rpc/session.rs index 7e1fa1ba194..2be689ba669 100644 --- a/runtime/src/enclave_rpc/session.rs +++ b/runtime/src/enclave_rpc/session.rs @@ -9,7 +9,7 @@ use super::types::Message; use crate::{ common::{ crypto::signature::{PublicKey, Signature, Signer}, - sgx::{ias, EnclaveIdentity, Quote, VerifiedQuote}, + sgx::{ias, EnclaveIdentity, Quote, QuotePolicy, VerifiedQuote}, }, rak::RAK, }; @@ -30,6 +30,8 @@ enum SessionError { Closed, #[error("mismatched enclave identity")] MismatchedEnclaveIdentity, + #[error("missing quote policy")] + MissingQuotePolicy, } /// Information about a session. @@ -50,6 +52,7 @@ pub struct Session { local_static_pub: Vec, rak: Option>, remote_enclaves: Option>, + policy: Option>, info: Option>, state: State, buf: Vec, @@ -61,11 +64,13 @@ impl Session { local_static_pub: Vec, rak: Option>, remote_enclaves: Option>, + policy: Option>, ) -> Self { Self { local_static_pub, rak, remote_enclaves, + policy, info: None, state: State::Handshake1(handshake_state), buf: vec![0u8; 65535], @@ -218,8 +223,13 @@ impl Session { return Ok(None); } + let policy = self + .policy + .as_ref() + .ok_or(SessionError::MissingQuotePolicy)?; + let rak_binding: RAKBinding = cbor::from_slice(rak_binding)?; - let verified_quote = rak_binding.verify(&self.remote_enclaves, remote_static)?; + let verified_quote = rak_binding.verify(remote_static, &self.remote_enclaves, policy)?; Ok(Some(Arc::new(SessionInfo { rak_binding, @@ -294,10 +304,11 @@ impl RAKBinding { /// Verify the RAK binding. pub fn verify( &self, - remote_enclaves: &Option>, remote_static: &[u8], + remote_enclaves: &Option>, + policy: &QuotePolicy, ) -> Result { - let verified_quote = self.verify_quote()?; + let verified_quote = self.verify_quote(policy)?; // Verify MRENCLAVE/MRSIGNER. if let Some(ref remote_enclaves) = remote_enclaves { @@ -317,10 +328,10 @@ impl RAKBinding { } /// Verify the quote that is part of the RAK binding. - pub fn verify_quote(&self) -> Result { + pub fn verify_quote(&self, policy: &QuotePolicy) -> Result { match self { - Self::V0 { ref avr, .. } => ias::verify(avr, &Default::default()), // TODO: Add policy. - Self::V1 { ref quote, .. } => quote.verify(&Default::default()), // TODO: Add policy. + Self::V0 { ref avr, .. } => ias::verify(avr, &policy.ias.clone().unwrap_or_default()), + Self::V1 { ref quote, .. } => quote.verify(policy), } } } @@ -330,6 +341,7 @@ impl RAKBinding { pub struct Builder { rak: Option>, remote_enclaves: Option>, + policy: Option>, } impl Builder { @@ -340,17 +352,23 @@ impl Builder { /// Enable remote enclave identity verification. pub fn remote_enclaves(mut self, enclaves: Option>) -> Self { - // TODO: Also add quote_policy argument. self.remote_enclaves = enclaves; self } + /// Configure quote policy used for remote quote verification. + pub fn quote_policy(mut self, policy: Option>) -> Self { + self.policy = policy; + self + } + /// Enable RAK binding. pub fn local_rak(mut self, rak: Arc) -> Self { self.rak = Some(rak); self } + #[allow(clippy::type_complexity)] fn build<'a>( mut self, ) -> ( @@ -358,32 +376,34 @@ impl Builder { snow::Keypair, Option>, Option>, + Option>, ) { let noise_builder = snow::Builder::new(NOISE_PATTERN.parse().unwrap()); let rak = self.rak.take(); let remote_enclaves = self.remote_enclaves.take(); + let quote_policy = self.policy.take(); let keypair = noise_builder.generate_keypair().unwrap(); - (noise_builder, keypair, rak, remote_enclaves) + (noise_builder, keypair, rak, remote_enclaves, quote_policy) } /// Build initiator session. pub fn build_initiator(self) -> Session { - let (builder, keypair, rak, enclaves) = self.build(); + let (builder, keypair, rak, enclaves, policy) = self.build(); let session = builder .local_private_key(&keypair.private) .build_initiator() .unwrap(); - Session::new(session, keypair.public, rak, enclaves) + Session::new(session, keypair.public, rak, enclaves, policy) } /// Build responder session. pub fn build_responder(self) -> Session { - let (builder, keypair, rak, enclaves) = self.build(); + let (builder, keypair, rak, enclaves, policy) = self.build(); let session = builder .local_private_key(&keypair.private) .build_responder() .unwrap(); - Session::new(session, keypair.public, rak, enclaves) + Session::new(session, keypair.public, rak, enclaves, policy) } } diff --git a/runtime/src/policy.rs b/runtime/src/policy.rs index 4341d423ee7..d3aced8908d 100644 --- a/runtime/src/policy.rs +++ b/runtime/src/policy.rs @@ -104,6 +104,30 @@ impl PolicyVerifier { Ok(policy) } + /// Verify that runtime's quote policy has been published in the consensus layer. + pub fn verify_quote_policy( + &self, + ctx: Arc, + policy: QuotePolicy, + runtime_id: &Namespace, + version: Option, + use_latest_state: bool, + ) -> Result { + let published_policy = self.quote_policy(ctx, runtime_id, version, use_latest_state)?; + + if policy != published_policy { + debug!( + self.logger, + "quote policy mismatch"; + "untrusted" => ?policy, + "published" => ?published_policy, + ); + return Err(PolicyVerifierError::PolicyNotPublished.into()); + } + + Ok(published_policy) + } + /// Fetch key manager's policy from the latest verified consensus layer state. pub fn key_manager_policy( &self, diff --git a/runtime/src/protocol.rs b/runtime/src/protocol.rs index b9f456851c4..6455e997fd9 100644 --- a/runtime/src/protocol.rs +++ b/runtime/src/protocol.rs @@ -380,6 +380,7 @@ impl Protocol { | Body::RuntimeCheckTxBatchRequest { .. } | Body::RuntimeExecuteTxBatchRequest { .. } | Body::RuntimeKeyManagerPolicyUpdateRequest { .. } + | Body::RuntimeKeyManagerQuotePolicyUpdateRequest { .. } | Body::RuntimeQueryRequest { .. } | Body::RuntimeConsensusSyncRequest { .. } => { self.ensure_initialized()?; diff --git a/runtime/src/types.rs b/runtime/src/types.rs index b686a6db31d..be43af1b6ff 100644 --- a/runtime/src/types.rs +++ b/runtime/src/types.rs @@ -10,7 +10,7 @@ use crate::{ signature::{PublicKey, Signature}, }, namespace::Namespace, - sgx::{ias::AVR, Quote}, + sgx::{ias::AVR, Quote, QuotePolicy}, version::Version, }, consensus::{ @@ -187,6 +187,10 @@ pub enum Body { signed_policy_raw: Vec, }, RuntimeKeyManagerPolicyUpdateResponse {}, + RuntimeKeyManagerQuotePolicyUpdateRequest { + policy: QuotePolicy, + }, + RuntimeKeyManagerQuotePolicyUpdateResponse {}, RuntimeQueryRequest { consensus_block: LightBlock, header: Header, diff --git a/tests/runtimes/simple-keyvalue/src/main.rs b/tests/runtimes/simple-keyvalue/src/main.rs index 9fd1e9f6d7b..b157655e80b 100644 --- a/tests/runtimes/simple-keyvalue/src/main.rs +++ b/tests/runtimes/simple-keyvalue/src/main.rs @@ -376,15 +376,23 @@ pub fn main_with_version(version: Version) { )); #[cfg(target_env = "sgx")] - let key_manager = km_client.clone(); - #[cfg(target_env = "sgx")] - state - .rpc_dispatcher - .set_keymanager_policy_update_handler(Some(Box::new(move |policy| { - key_manager - .set_policy(policy) - .expect("failed to update km client policy"); - }))); + { + let key_manager = km_client.clone(); + state + .rpc_dispatcher + .set_keymanager_policy_update_handler(Some(Box::new(move |policy| { + key_manager + .set_policy(policy) + .expect("failed to update km client policy"); + }))); + + let key_manager = km_client.clone(); + state + .rpc_dispatcher + .set_keymanager_quote_policy_update_handler(Some(Box::new(move |policy| { + key_manager.set_quote_policy(policy); + }))); + } let dispatcher = Dispatcher::new(hi, km_client, state.consensus_verifier.clone()); From d9ed76be15af85d4118feb70aa8a55d38862959d Mon Sep 17 00:00:00 2001 From: Peter Nose Date: Mon, 12 Dec 2022 01:33:53 +0100 Subject: [PATCH 8/8] go/runtime/registry/host: Add key manager quote policy updates feature --- go/runtime/host/protocol/types.go | 3 +++ go/runtime/registry/host.go | 14 ++++++++++++++ runtime/src/types.rs | 14 +++++++++++++- tests/runtimes/simple-keyvalue/src/main.rs | 1 + 4 files changed, 31 insertions(+), 1 deletion(-) diff --git a/go/runtime/host/protocol/types.go b/go/runtime/host/protocol/types.go index 10f31999736..d26b3e45e35 100644 --- a/go/runtime/host/protocol/types.go +++ b/go/runtime/host/protocol/types.go @@ -171,6 +171,9 @@ type RuntimeInfoRequest struct { type Features struct { // ScheduleControl is the schedule control feature. ScheduleControl *FeatureScheduleControl `json:"schedule_control,omitempty"` + // KeyManagerQuotePolicyUpdates is a feature specifying that the runtime supports updating + // key manager's quote policy. + KeyManagerQuotePolicyUpdates bool `json:"key_manager_quote_policy_updates,omitempty"` } // HasScheduleControl returns true when the runtime supports the schedule control feature. diff --git a/go/runtime/registry/host.go b/go/runtime/registry/host.go index 43842b047d0..cfd20be7a9a 100644 --- a/go/runtime/registry/host.go +++ b/go/runtime/registry/host.go @@ -528,6 +528,15 @@ func (n *runtimeHostNotifier) watchKmPolicyUpdates(ctx context.Context, kmRtID * } defer evSub.Close() + // Fetch runtime info so that we know which features the runtime supports. + rtInfo, err := n.host.GetInfo(ctx) + if err != nil { + n.logger.Error("failed to fetch runtime info", + "err", err, + ) + return + } + var ( st *keymanager.Status sc *node.SGXConstraints @@ -547,6 +556,11 @@ func (n *runtimeHostNotifier) watchKmPolicyUpdates(ctx context.Context, kmRtID * n.updateKeyManagerPolicy(ctx, st.Policy) case epoch := <-epoCh: + // Skip quote policy updates if the runtime doesn't support them. + if !rtInfo.Features.KeyManagerQuotePolicyUpdates { + continue + } + // Check if the key manager was redeployed, as that is when a new quote policy might // take effect. dsc, err := n.consensus.Registry().GetRuntime(ctx, ®istry.GetRuntimeQuery{ diff --git a/runtime/src/types.rs b/runtime/src/types.rs index be43af1b6ff..d038bdb2f99 100644 --- a/runtime/src/types.rs +++ b/runtime/src/types.rs @@ -320,11 +320,23 @@ pub struct RuntimeInfoRequest { } /// Set of supported runtime features. -#[derive(Clone, Debug, Default, cbor::Encode, cbor::Decode)] +#[derive(Clone, Debug, cbor::Encode, cbor::Decode)] pub struct Features { /// Schedule control feature. #[cbor(optional)] pub schedule_control: Option, + /// A feature specifying that the runtime supports updating key manager's quote policy. + #[cbor(optional)] + pub key_manager_quote_policy_updates: bool, +} + +impl Default for Features { + fn default() -> Self { + Self { + schedule_control: None, + key_manager_quote_policy_updates: true, + } + } } /// A feature specifying that the runtime supports controlling the scheduling of batches. This means diff --git a/tests/runtimes/simple-keyvalue/src/main.rs b/tests/runtimes/simple-keyvalue/src/main.rs index b157655e80b..df81c31b0c2 100644 --- a/tests/runtimes/simple-keyvalue/src/main.rs +++ b/tests/runtimes/simple-keyvalue/src/main.rs @@ -426,6 +426,7 @@ pub fn main_with_version(version: Version) { schedule_control: Some(FeatureScheduleControl { initial_batch_size: MAX_BATCH_SIZE.try_into().unwrap(), }), + ..Default::default() }), freshness_proofs: true, ..Default::default()