From 9a9b2a03e3710465d684e44f2f6a123838c675cc Mon Sep 17 00:00:00 2001 From: Mohsen Zohrevandi Date: Fri, 2 Oct 2020 11:33:31 -0700 Subject: [PATCH 01/22] Async usercall interface for SGX enclaves --- .travis.yml | 3 +- Cargo.lock | 18 + Cargo.toml | 1 + async-usercalls/Cargo.toml | 31 ++ async-usercalls/rustfmt.toml | 1 + async-usercalls/src/alloc/allocator.rs | 145 ++++++++ async-usercalls/src/alloc/bitmap.rs | 156 +++++++++ async-usercalls/src/alloc/io_bufs.rs | 260 ++++++++++++++ async-usercalls/src/alloc/mod.rs | 69 ++++ async-usercalls/src/alloc/slab.rs | 198 +++++++++++ async-usercalls/src/alloc/tests.rs | 323 ++++++++++++++++++ async-usercalls/src/batch_drop.rs | 127 +++++++ async-usercalls/src/callback.rs | 89 +++++ async-usercalls/src/duplicated.rs | 168 +++++++++ async-usercalls/src/hacks/async_queues.rs | 50 +++ async-usercalls/src/hacks/mod.rs | 61 ++++ async-usercalls/src/hacks/unsafe_typecasts.rs | 95 ++++++ async-usercalls/src/lib.rs | 165 +++++++++ async-usercalls/src/provider_api.rs | 274 +++++++++++++++ async-usercalls/src/provider_core.rs | 69 ++++ async-usercalls/src/queues.rs | 188 ++++++++++ async-usercalls/src/raw.rs | 155 +++++++++ async-usercalls/src/tests.rs | 251 ++++++++++++++ async-usercalls/test.sh | 14 + 24 files changed, 2910 insertions(+), 1 deletion(-) create mode 100644 async-usercalls/Cargo.toml create mode 100644 async-usercalls/rustfmt.toml create mode 100644 async-usercalls/src/alloc/allocator.rs create mode 100644 async-usercalls/src/alloc/bitmap.rs create mode 100644 async-usercalls/src/alloc/io_bufs.rs create mode 100644 async-usercalls/src/alloc/mod.rs create mode 100644 async-usercalls/src/alloc/slab.rs create mode 100644 async-usercalls/src/alloc/tests.rs create mode 100644 async-usercalls/src/batch_drop.rs create mode 100644 async-usercalls/src/callback.rs create mode 100644 async-usercalls/src/duplicated.rs create mode 100644 async-usercalls/src/hacks/async_queues.rs create mode 100644 async-usercalls/src/hacks/mod.rs create mode 100644 async-usercalls/src/hacks/unsafe_typecasts.rs create mode 100644 async-usercalls/src/lib.rs create mode 100644 async-usercalls/src/provider_api.rs create mode 100644 async-usercalls/src/provider_core.rs create mode 100644 async-usercalls/src/queues.rs create mode 100644 async-usercalls/src/raw.rs create mode 100644 async-usercalls/src/tests.rs create mode 100755 async-usercalls/test.sh diff --git a/.travis.yml b/.travis.yml index 5da799f6..db315677 100644 --- a/.travis.yml +++ b/.travis.yml @@ -29,7 +29,8 @@ matrix: before_script: - rustup target add x86_64-fortanix-unknown-sgx x86_64-unknown-linux-musl script: - - cargo test --verbose --all --exclude sgxs-loaders && [ "$(echo $(nm -D target/debug/sgx-detect|grep __vdso_sgx_enter_enclave))" = "w __vdso_sgx_enter_enclave" ] + - cargo test --verbose --all --exclude sgxs-loaders --exclude async-usercalls && [ "$(echo $(nm -D target/debug/sgx-detect|grep __vdso_sgx_enter_enclave))" = "w __vdso_sgx_enter_enclave" ] + - cargo test --verbose -p async-usercalls --target x86_64-fortanix-unknown-sgx --no-run - cargo test --verbose -p sgx-isa --features sgxstd --target x86_64-fortanix-unknown-sgx --no-run - cargo test --verbose -p sgxs-tools --features pe2sgxs --bin isgx-pe2sgx - cargo test --verbose -p dcap-ql --features link diff --git a/Cargo.lock b/Cargo.lock index 759a64b0..2c4b08d7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -67,6 +67,18 @@ version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d25d88fd6b8041580a654f9d0c581a047baee2b3efee13275f2fc392fc75034" +[[package]] +name = "async-usercalls" +version = "0.1.0" +dependencies = [ + "crossbeam-channel", + "fnv", + "fortanix-sgx-abi", + "ipc-queue", + "lazy_static 1.4.0", + "spin", +] + [[package]] name = "atty" version = "0.2.14" @@ -2331,6 +2343,12 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + [[package]] name = "static_assertions" version = "1.1.0" diff --git a/Cargo.toml b/Cargo.toml index 96cf25bc..df294fd1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,7 @@ [workspace] members = [ "aesm-client", + "async-usercalls", "dcap-provider", "dcap-ql-sys", "dcap-ql", diff --git a/async-usercalls/Cargo.toml b/async-usercalls/Cargo.toml new file mode 100644 index 00000000..64c1e0dc --- /dev/null +++ b/async-usercalls/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "async-usercalls" +version = "0.1.0" +authors = ["Fortanix, Inc."] +license = "MPL-2.0" +edition = "2018" +description = """ +An interface for asynchronous usercalls in SGX enclaves. + +This is an SGX-only crate, you should compile it with the `x86_64-fortanix-unknown-sgx` target. +""" +repository = "https://github.com/fortanix/rust-sgx" +documentation = "https://edp.fortanix.com/docs/api/async_usercalls/" +homepage = "https://edp.fortanix.com/" +keywords = ["sgx", "async", "usercall"] +categories = ["asynchronous"] + +[dependencies] +# Project dependencies +ipc-queue = { version = "0.1", path = "../ipc-queue" } +fortanix-sgx-abi = { version = "0.4", path = "../fortanix-sgx-abi" } + +# External dependencies +lazy_static = "1.4.0" # MIT/Apache-2.0 +crossbeam-channel = "0.4" # MIT/Apache-2.0 +spin = "0.5" # MIT/Apache-2.0 +fnv = "1.0" # MIT/Apache-2.0 + +# For cargo test --target x86_64-fortanix-unknown-sgx +[package.metadata.fortanix-sgx] +threads = 128 diff --git a/async-usercalls/rustfmt.toml b/async-usercalls/rustfmt.toml new file mode 100644 index 00000000..75306517 --- /dev/null +++ b/async-usercalls/rustfmt.toml @@ -0,0 +1 @@ +max_width = 120 diff --git a/async-usercalls/src/alloc/allocator.rs b/async-usercalls/src/alloc/allocator.rs new file mode 100644 index 00000000..7c6cef9f --- /dev/null +++ b/async-usercalls/src/alloc/allocator.rs @@ -0,0 +1,145 @@ +use super::slab::{BufSlab, Slab, SlabAllocator, User, MAX_COUNT}; +use std::cmp; +use std::os::fortanix_sgx::usercalls::raw::ByteBuffer; + +pub const MIN_BUF_SIZE: usize = 1 << 5; // 32 bytes +pub const MAX_BUF_SIZE: usize = 1 << 16; // 64 KB +pub const NUM_SIZES: usize = 1 + (MAX_BUF_SIZE / MIN_BUF_SIZE).trailing_zeros() as usize; + +pub struct SharedAllocator { + by_size: Vec>, + byte_buffers: Vec>, +} + +unsafe impl Send for SharedAllocator {} +unsafe impl Sync for SharedAllocator {} + +impl SharedAllocator { + pub fn new(buf_counts: [usize; NUM_SIZES], byte_buffer_count: usize) -> Self { + let mut by_size = Vec::with_capacity(NUM_SIZES); + for i in 0..NUM_SIZES { + by_size.push(make_buf_slabs(buf_counts[i], MIN_BUF_SIZE << i)); + } + let byte_buffers = make_byte_buffers(byte_buffer_count); + Self { by_size, byte_buffers } + } + + pub fn alloc_buf(&self, size: usize) -> Option> { + assert!(size > 0); + if size > MAX_BUF_SIZE { + return None; + } + let (_, index) = size_index(size); + self.by_size[index].alloc() + } + + pub fn alloc_byte_buffer(&self) -> Option> { + self.byte_buffers.alloc() + } +} + +pub struct LocalAllocator { + initial_buf_counts: [usize; NUM_SIZES], + initial_byte_buffer_count: usize, + inner: SharedAllocator, +} + +impl LocalAllocator { + pub fn new(initial_buf_counts: [usize; NUM_SIZES], initial_byte_buffer_count: usize) -> Self { + let mut by_size = Vec::with_capacity(NUM_SIZES); + by_size.resize_with(NUM_SIZES, Default::default); + let byte_buffers = Vec::new(); + Self { + initial_buf_counts, + initial_byte_buffer_count, + inner: SharedAllocator { by_size, byte_buffers }, + } + } + + pub fn alloc_buf(&mut self, request_size: usize) -> User<[u8]> { + assert!(request_size > 0); + if request_size > MAX_BUF_SIZE { + // Always allocate very large buffers directly + return User::<[u8]>::uninitialized(request_size); + } + let (size, index) = size_index(request_size); + if let Some(buf) = self.inner.by_size[index].alloc() { + return buf; + } + let slabs = &mut self.inner.by_size[index]; + if slabs.len() >= 8 { + // Keep the number of slabs for each size small. + return User::<[u8]>::uninitialized(request_size); + } + let count = slabs.last().map_or(self.initial_buf_counts[index], |s| s.count() * 2); + // Limit each slab's count for better worst-case performance. + let count = cmp::min(count, MAX_COUNT / 8); + slabs.push(BufSlab::new(count, size)); + slabs.last().unwrap().alloc().expect("fresh slab failed to allocate") + } + + pub fn alloc_byte_buffer(&mut self) -> User { + let bbs = &mut self.inner.byte_buffers; + if let Some(byte_buffer) = bbs.alloc() { + return byte_buffer; + } + if bbs.len() >= 8 { + // Keep the number of slabs small. + return User::::uninitialized(); + } + let count = bbs.last().map_or(self.initial_byte_buffer_count, |s| s.count() * 2); + // Limit each slab's count for better worst-case performance. + let count = cmp::min(count, MAX_COUNT / 8); + bbs.push(Slab::new(count)); + bbs.last().unwrap().alloc().expect("fresh slab failed to allocate") + } +} + +fn make_buf_slabs(count: usize, size: usize) -> Vec { + match count { + 0 => Vec::new(), + n if n < 1024 => vec![BufSlab::new(n, size)], + n if n < 4 * 1024 => vec![BufSlab::new(n / 2, size), BufSlab::new(n / 2, size)], + n if n < 32 * 1024 => vec![ + BufSlab::new(n / 4, size), + BufSlab::new(n / 4, size), + BufSlab::new(n / 4, size), + BufSlab::new(n / 4, size), + ], + n => vec![ + BufSlab::new(n / 8, size), + BufSlab::new(n / 8, size), + BufSlab::new(n / 8, size), + BufSlab::new(n / 8, size), + BufSlab::new(n / 8, size), + BufSlab::new(n / 8, size), + BufSlab::new(n / 8, size), + BufSlab::new(n / 8, size), + ], + } +} + +fn make_byte_buffers(count: usize) -> Vec> { + match count { + 0 => Vec::new(), + n if n < 1024 => vec![Slab::new(n)], + n if n < 4 * 1024 => vec![Slab::new(n / 2), Slab::new(n / 2)], + n if n < 32 * 1024 => vec![Slab::new(n / 4), Slab::new(n / 4), Slab::new(n / 4), Slab::new(n / 4)], + n => vec![ + Slab::new(n / 8), + Slab::new(n / 8), + Slab::new(n / 8), + Slab::new(n / 8), + Slab::new(n / 8), + Slab::new(n / 8), + Slab::new(n / 8), + Slab::new(n / 8), + ], + } +} + +fn size_index(request_size: usize) -> (usize, usize) { + let size = cmp::max(MIN_BUF_SIZE, request_size.next_power_of_two()); + let index = (size / MIN_BUF_SIZE).trailing_zeros() as usize; + (size, index) +} diff --git a/async-usercalls/src/alloc/bitmap.rs b/async-usercalls/src/alloc/bitmap.rs new file mode 100644 index 00000000..80da1cca --- /dev/null +++ b/async-usercalls/src/alloc/bitmap.rs @@ -0,0 +1,156 @@ +use spin::Mutex; +use std::sync::atomic::*; + +pub struct OptionalBitmap(BitmapKind); + +struct LargeBitmap(Mutex); + +struct LargeBitmapInner { + bits: Box<[u64]>, + unset_count: usize, // optimization +} + +enum BitmapKind { + None, + V1(AtomicU8), + V2(AtomicU16), + V3(AtomicU32), + V4(AtomicU64), + V5(LargeBitmap), +} + +impl OptionalBitmap { + pub fn none() -> Self { + Self(BitmapKind::None) + } + + /// `bit_count` must be >= 8 and a power of two + pub fn new(bit_count: usize) -> Self { + Self(match bit_count { + 8 => BitmapKind::V1(AtomicU8::new(0)), + 16 => BitmapKind::V2(AtomicU16::new(0)), + 32 => BitmapKind::V3(AtomicU32::new(0)), + 64 => BitmapKind::V4(AtomicU64::new(0)), + n if n > 0 && n % 64 == 0 => { + let bits = vec![0u64; n / 64].into_boxed_slice(); + BitmapKind::V5(LargeBitmap(Mutex::new(LargeBitmapInner { + bits, + unset_count: bit_count, + }))) + } + _ => panic!("bit_count must be >= 8 and a power of two"), + }) + } + + /// set the bit at given index to 0 and panic if the old value was not 1. + pub fn unset(&self, index: usize) { + match self.0 { + BitmapKind::None => {} + BitmapKind::V1(ref a) => a.unset(index), + BitmapKind::V2(ref b) => b.unset(index), + BitmapKind::V3(ref c) => c.unset(index), + BitmapKind::V4(ref d) => d.unset(index), + BitmapKind::V5(ref e) => e.unset(index), + } + } + + /// return the index of a previously unset bit and set that bit to 1. + pub fn reserve(&self) -> Option { + match self.0 { + BitmapKind::None => None, + BitmapKind::V1(ref a) => a.reserve(), + BitmapKind::V2(ref b) => b.reserve(), + BitmapKind::V3(ref c) => c.reserve(), + BitmapKind::V4(ref d) => d.reserve(), + BitmapKind::V5(ref e) => e.reserve(), + } + } +} + +trait BitmapOps { + fn unset(&self, index: usize); + fn reserve(&self) -> Option; +} + +macro_rules! impl_bitmap_ops { + ( $( $t:ty ),* $(,)? ) => {$( + impl BitmapOps for $t { + fn unset(&self, index: usize) { + let bit = 1 << index; + let old = self.fetch_and(!bit, Ordering::Release) & bit; + assert!(old != 0); + } + + fn reserve(&self) -> Option { + let initial = self.load(Ordering::Relaxed); + let unset_count = initial.count_zeros(); + let (mut index, mut bit) = match unset_count { + 0 => return None, + _ => (0, 1), + }; + for _ in 0..unset_count { + // find the next unset bit + while bit & initial != 0 { + index += 1; + bit = bit << 1; + } + let old = self.fetch_or(bit, Ordering::Acquire) & bit; + if old == 0 { + return Some(index); + } + index += 1; + bit = bit << 1; + } + None + } + } + )*}; +} + +impl_bitmap_ops!(AtomicU8, AtomicU16, AtomicU32, AtomicU64); + +impl BitmapOps for LargeBitmap { + fn unset(&self, index: usize) { + let mut inner = self.0.lock(); + let array = &mut inner.bits; + assert!(index < array.len() * 64); + let slot = index / 64; + let offset = index % 64; + let element = &mut array[slot]; + + let bit = 1 << offset; + let old = *element & bit; + *element = *element & !bit; + inner.unset_count += 1; + assert!(old != 0); + } + + fn reserve(&self) -> Option { + let mut inner = self.0.lock(); + if inner.unset_count == 0 { + return None; + } + let array = &mut inner.bits; + for slot in 0..array.len() { + if let (Some(offset), val) = reserve_u64(array[slot]) { + array[slot] = val; + inner.unset_count -= 1; + return Some(slot * 64 + offset); + } + } + unreachable!() + } +} + +fn reserve_u64(element: u64) -> (Option, u64) { + let (mut index, mut bit) = match element.count_zeros() { + 0 => return (None, element), + _ => (0, 1), + }; + // find the first unset bit + while bit & element != 0 { + index += 1; + bit = bit << 1; + } + (Some(index), element | bit) +} diff --git a/async-usercalls/src/alloc/io_bufs.rs b/async-usercalls/src/alloc/io_bufs.rs new file mode 100644 index 00000000..3880e763 --- /dev/null +++ b/async-usercalls/src/alloc/io_bufs.rs @@ -0,0 +1,260 @@ +use super::slab::User; +use std::cell::UnsafeCell; +use std::cmp; +use std::io::IoSlice; +use std::ops::{Deref, DerefMut, Range}; +use std::os::fortanix_sgx::usercalls::alloc::UserRef; +use std::sync::Arc; + +pub struct UserBuf(UserBufKind); + +enum UserBufKind { + Owned { + user: User<[u8]>, + range: Range, + }, + Shared { + user: Arc>>, + range: Range, + }, +} + +impl UserBuf { + pub fn into_user(self) -> Result, Self> { + match self.0 { + UserBufKind::Owned { user, .. } => Ok(user), + UserBufKind::Shared { user, range } => Err(Self(UserBufKind::Shared { user, range })), + } + } + + fn into_shared(self) -> Option>>> { + match self.0 { + UserBufKind::Owned { .. } => None, + UserBufKind::Shared { user, .. } => Some(user), + } + } +} + +unsafe impl Send for UserBuf {} + +impl Deref for UserBuf { + type Target = UserRef<[u8]>; + + fn deref(&self) -> &Self::Target { + match self.0 { + UserBufKind::Owned { ref user, ref range } => &user[range.start..range.end], + UserBufKind::Shared { ref user, ref range } => { + let user = unsafe { &*user.get() }; + &user[range.start..range.end] + } + } + } +} + +impl DerefMut for UserBuf { + fn deref_mut(&mut self) -> &mut Self::Target { + match self.0 { + UserBufKind::Owned { + ref mut user, + ref range, + } => &mut user[range.start..range.end], + UserBufKind::Shared { ref user, ref range } => { + let user = unsafe { &mut *user.get() }; + &mut user[range.start..range.end] + } + } + } +} + +impl From> for UserBuf { + fn from(user: User<[u8]>) -> Self { + UserBuf(UserBufKind::Owned { + range: 0..user.len(), + user, + }) + } +} + +impl From<(User<[u8]>, Range)> for UserBuf { + fn from(pair: (User<[u8]>, Range)) -> Self { + UserBuf(UserBufKind::Owned { + user: pair.0, + range: pair.1, + }) + } +} + +/// `WriteBuffer` provides a ring buffer that can be written to by the code +/// running in the enclave while a portion of it can be passed to a `write` +/// usercall running concurrently. It ensures that enclave code does not write +/// to the portion sent to userspace. +pub struct WriteBuffer { + userbuf: Arc>>, + buf_len: usize, + read: u32, + write: u32, +} + +unsafe impl Send for WriteBuffer {} + +impl WriteBuffer { + pub fn new(userbuf: User<[u8]>) -> Self { + Self { + buf_len: userbuf.len(), + userbuf: Arc::new(UnsafeCell::new(userbuf)), + read: 0, + write: 0, + } + } + + pub fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> usize { + if self.is_full() { + return 0; + } + let mut wrote = 0; + for buf in bufs { + wrote += self.write(buf); + } + wrote + } + + pub fn write(&mut self, buf: &[u8]) -> usize { + let (_, write_offset) = self.offsets(); + let rem = self.remaining_capacity(); + let can_write = cmp::min(buf.len(), rem); + let end = cmp::min(self.buf_len, write_offset + can_write); + let n = end - write_offset; + unsafe { + let userbuf = &mut *self.userbuf.get(); + userbuf[write_offset..write_offset + n].copy_from_enclave(&buf[..n]); + } + self.advance_write(n); + n + if n < can_write { self.write(&buf[n..]) } else { 0 } + } + + /// This function returns a slice of bytes appropriate for writing to a socket. + /// Once some or all of these bytes are successfully written to the socket, + /// `self.consume()` must be called to actually consume those bytes. + /// + /// Returns None if the buffer is empty. + /// + /// Panics if called more than once in a row without either calling `consume()` + /// or dropping the previously returned buffer. + pub fn consumable_chunk(&mut self) -> Option { + assert!( + Arc::strong_count(&self.userbuf) == 1, + "called consumable_chunk() more than once in a row" + ); + let range = match self.offsets() { + (_, _) if self.read == self.write => return None, // empty + (r, w) if r < w => r..w, + (r, _) => r..self.buf_len, + }; + Some(UserBuf(UserBufKind::Shared { + user: self.userbuf.clone(), + range, + })) + } + + /// Mark `n` bytes as consumed. `buf` must have been produced by a call + /// to `self.consumable_chunk()`. + /// Panics if: + /// - `n > buf.len()` + /// - `buf` was not produced by `self.consumable_chunk()` + /// + /// This function is supposed to be used in conjunction with `consumable_chunk()`. + pub fn consume(&mut self, buf: UserBuf, n: usize) { + assert!(n <= buf.len()); + const PANIC_MESSAGE: &'static str = "`buf` not produced by self.consumable_chunk()"; + let buf = buf.into_shared().expect(PANIC_MESSAGE); + assert!(Arc::ptr_eq(&self.userbuf, &buf), PANIC_MESSAGE); + drop(buf); + assert!(Arc::strong_count(&self.userbuf) == 1, PANIC_MESSAGE); + self.advance_read(n); + } + + fn len(&self) -> usize { + match self.offsets() { + (_, _) if self.read == self.write => 0, // empty + (r, w) if r == w && self.read != self.write => self.buf_len, // full + (r, w) if r < w => w - r, + (r, w) => w + self.buf_len - r, + } + } + + fn remaining_capacity(&self) -> usize { + let len = self.len(); + debug_assert!(len <= self.buf_len); + self.buf_len - len + } + + fn offsets(&self) -> (usize, usize) { + (self.read as usize % self.buf_len, self.write as usize % self.buf_len) + } + + pub fn is_empty(&self) -> bool { + self.read == self.write + } + + fn is_full(&self) -> bool { + let (read_offset, write_offset) = self.offsets(); + read_offset == write_offset && self.read != self.write + } + + fn advance_read(&mut self, by: usize) { + debug_assert!(by <= self.len()); + self.read = ((self.read as usize + by) % (self.buf_len * 2)) as _; + } + + fn advance_write(&mut self, by: usize) { + debug_assert!(by <= self.remaining_capacity()); + self.write = ((self.write as usize + by) % (self.buf_len * 2)) as _; + } +} + +pub struct ReadBuffer { + userbuf: User<[u8]>, + position: usize, + len: usize, +} + +impl ReadBuffer { + /// Constructs a new `ReadBuffer`, assuming `len` bytes of `userbuf` have + /// meaningful data. Panics if `len > userbuf.len()`. + pub fn new(userbuf: User<[u8]>, len: usize) -> ReadBuffer { + assert!(len <= userbuf.len()); + ReadBuffer { + userbuf, + position: 0, + len, + } + } + + pub fn read(&mut self, buf: &mut [u8]) -> usize { + debug_assert!(self.position <= self.len); + if self.position == self.len { + return 0; + } + let n = cmp::min(buf.len(), self.len - self.position); + self.userbuf[self.position..self.position + n].copy_to_enclave(&mut buf[..n]); + self.position += n; + n + } + + /// Returns the number of bytes that have not been read yet. + pub fn remaining_bytes(&self) -> usize { + debug_assert!(self.position <= self.len); + self.len - self.position + } + + pub fn len(&self) -> usize { + self.len + } + + /// Consumes self and returns the internal userspace buffer. + /// It's the caller's responsibility to ensure all bytes have been read + /// before calling this function. + pub fn into_inner(self) -> User<[u8]> { + self.userbuf + } +} diff --git a/async-usercalls/src/alloc/mod.rs b/async-usercalls/src/alloc/mod.rs new file mode 100644 index 00000000..ab1085c0 --- /dev/null +++ b/async-usercalls/src/alloc/mod.rs @@ -0,0 +1,69 @@ +use std::cell::RefCell; +use std::os::fortanix_sgx::usercalls::raw::ByteBuffer; + +mod allocator; +mod bitmap; +mod io_bufs; +mod slab; +#[cfg(test)] +mod tests; + +use self::allocator::{LocalAllocator, SharedAllocator}; +pub use self::io_bufs::{ReadBuffer, UserBuf, WriteBuffer}; +pub use self::slab::{User, UserSafeExt}; + +/// Allocates a slice of bytes in userspace that is at least as large as `size`. +pub fn alloc_buf(size: usize) -> User<[u8]> { + if let Some(buf) = SHARED.alloc_buf(size) { + return buf; + } + LOCAL.with(|local| local.borrow_mut().alloc_buf(size)) +} + +/// Allocates a `ByteBuffer` in userspace. +pub fn alloc_byte_buffer() -> User { + if let Some(bb) = SHARED.alloc_byte_buffer() { + return bb; + } + LOCAL.with(|local| local.borrow_mut().alloc_byte_buffer()) +} + +lazy_static::lazy_static! { + static ref SHARED: SharedAllocator = SharedAllocator::new( + [ + 8192, // x 32 bytes + 4096, // x 64 bytes + 2048, // x 128 bytes + 1024, // x 256 bytes + 512, // x 512 bytes + 256, // x 1 KB + 64, // x 2 KB + 32, // x 4 KB + 16, // x 8 KB + 1024, // x 16 KB + 32, // x 32 KB + 16, // x 64 KB + ], + 8192, // x ByteBuffer(s) + ); +} + +std::thread_local! { + static LOCAL: RefCell = RefCell::new(LocalAllocator::new( + [ + 128, // x 32 bytes + 64, // x 64 bytes + 32, // x 128 bytes + 16, // x 256 bytes + 8, // x 512 bytes + 8, // x 1 KB + 8, // x 2 KB + 8, // x 4 KB + 8, // x 8 KB + 8, // x 16 KB + 8, // x 32 KB + 8, // x 64 KB + ], + 64, // x ByteBuffer(s) + )); +} diff --git a/async-usercalls/src/alloc/slab.rs b/async-usercalls/src/alloc/slab.rs new file mode 100644 index 00000000..a9e0a0c4 --- /dev/null +++ b/async-usercalls/src/alloc/slab.rs @@ -0,0 +1,198 @@ +use super::bitmap::OptionalBitmap; +use std::cell::UnsafeCell; +use std::mem; +use std::ops::{Deref, DerefMut}; +use std::os::fortanix_sgx::usercalls::alloc::{User as StdUser, UserRef, UserSafe, UserSafeSized}; +use std::sync::Arc; + +pub const MIN_COUNT: usize = 8; +pub const MAX_COUNT: usize = 64 * 1024; +pub const MIN_UNIT_LEN: usize = 32; + +pub trait SlabAllocator { + type Output; + + fn alloc(&self) -> Option; + fn count(&self) -> usize; + fn total_size(&self) -> usize; +} + +impl SlabAllocator for Vec { + type Output = A::Output; + + fn alloc(&self) -> Option { + for a in self.iter() { + if let Some(buf) = a.alloc() { + return Some(buf); + } + } + None + } + + fn count(&self) -> usize { + self.iter().map(|a| a.count()).sum() + } + + fn total_size(&self) -> usize { + self.iter().map(|a| a.total_size()).sum() + } +} + +struct Storage { + user: UnsafeCell>, + bitmap: OptionalBitmap, +} + +pub struct BufSlab { + storage: Arc>, + unit_len: usize, +} + +impl BufSlab { + pub fn new(count: usize, unit_len: usize) -> Self { + assert!(count.is_power_of_two() && count >= MIN_COUNT && count <= MAX_COUNT); + assert!(unit_len.is_power_of_two() && unit_len >= MIN_UNIT_LEN); + BufSlab { + storage: Arc::new(Storage { + user: UnsafeCell::new(StdUser::<[u8]>::uninitialized(count * unit_len)), + bitmap: OptionalBitmap::new(count), + }), + unit_len, + } + } +} + +impl SlabAllocator for BufSlab { + type Output = User<[u8]>; + + fn alloc(&self) -> Option { + let index = self.storage.bitmap.reserve()?; + let start = index * self.unit_len; + let end = start + self.unit_len; + let user = unsafe { &mut *self.storage.user.get() }; + let user_ref = &mut user[start..end]; + Some(User { + user_ref, + storage: self.storage.clone(), + index, + }) + } + + fn count(&self) -> usize { + self.total_size() / self.unit_len + } + + fn total_size(&self) -> usize { + let user = unsafe { &*self.storage.user.get() }; + user.len() + } +} + +pub trait UserSafeExt: UserSafe { + type Element: UserSafeSized; +} + +impl UserSafeExt for [T] { + type Element = T; +} + +impl UserSafeExt for T { + type Element = T; +} + +pub struct User { + user_ref: &'static mut UserRef, + storage: Arc>, + index: usize, +} + +unsafe impl Send for User {} + +impl User { + pub fn uninitialized() -> Self { + let storage = Arc::new(Storage { + user: UnsafeCell::new(StdUser::<[T]>::uninitialized(1)), + bitmap: OptionalBitmap::none(), + }); + let user = unsafe { &mut *storage.user.get() }; + let user_ref = &mut user[0]; + Self { + user_ref, + storage, + index: 0, + } + } +} + +impl User<[T]> { + pub fn uninitialized(n: usize) -> Self { + let storage = Arc::new(Storage { + user: UnsafeCell::new(StdUser::<[T]>::uninitialized(n)), + bitmap: OptionalBitmap::none(), + }); + let user = unsafe { &mut *storage.user.get() }; + let user_ref = &mut user[..]; + Self { + user_ref, + storage, + index: 0, + } + } +} + +impl Drop for User { + fn drop(&mut self) { + self.storage.bitmap.unset(self.index); + } +} + +impl Deref for User { + type Target = UserRef; + + fn deref(&self) -> &Self::Target { + self.user_ref + } +} + +impl DerefMut for User { + fn deref_mut(&mut self) -> &mut Self::Target { + self.user_ref + } +} + +pub struct Slab(Arc>); + +impl Slab { + pub fn new(count: usize) -> Self { + assert!(count.is_power_of_two() && count >= MIN_COUNT && count <= MAX_COUNT); + Slab(Arc::new(Storage { + user: UnsafeCell::new(StdUser::<[T]>::uninitialized(count)), + bitmap: OptionalBitmap::new(count), + })) + } +} + +impl SlabAllocator for Slab { + type Output = User; + + fn alloc(&self) -> Option { + let index = self.0.bitmap.reserve()?; + let user = unsafe { &mut *self.0.user.get() }; + let user_ref = &mut user[index]; + Some(User { + user_ref, + storage: self.0.clone(), + index, + }) + } + + fn count(&self) -> usize { + let user = unsafe { &*self.0.user.get() }; + user.len() + } + + fn total_size(&self) -> usize { + let user = unsafe { &*self.0.user.get() }; + user.len() * mem::size_of::() + } +} diff --git a/async-usercalls/src/alloc/tests.rs b/async-usercalls/src/alloc/tests.rs new file mode 100644 index 00000000..da4e8b3d --- /dev/null +++ b/async-usercalls/src/alloc/tests.rs @@ -0,0 +1,323 @@ +use super::allocator::SharedAllocator; +use super::bitmap::*; +use super::io_bufs::{ReadBuffer, UserBuf, WriteBuffer}; +use super::slab::{BufSlab, Slab, SlabAllocator, User}; +use crossbeam_channel as mpmc; +use std::collections::HashSet; +use std::os::fortanix_sgx::usercalls::raw::ByteBuffer; +use std::sync::atomic::*; +use std::sync::Arc; +use std::thread; +use std::time::Instant; + +// Copied from Rust tests (test/ui/mpsc_stress.rs) +struct Barrier { + // Not using mutex/condvar for precision + shared: Arc, + count: usize, +} + +impl Barrier { + fn new(count: usize) -> Vec { + let shared = Arc::new(AtomicUsize::new(0)); + (0..count) + .map(|_| Barrier { + shared: shared.clone(), + count: count, + }) + .collect() + } + + /// Returns when `count` threads enter `wait` + fn wait(self) { + self.shared.fetch_add(1, Ordering::SeqCst); + while self.shared.load(Ordering::SeqCst) != self.count {} + } +} + +#[test] +fn bitmap() { + const BITS: usize = 1024; + let bitmap = OptionalBitmap::new(BITS); + for _ in 0..BITS { + assert!(bitmap.reserve().is_some()); + } + let mut indices = vec![34, 7, 5, 6, 120, 121, 122, 127, 0, 9] + .into_iter() + .collect::>(); + for &i in indices.iter() { + bitmap.unset(i); + } + while let Some(index) = bitmap.reserve() { + assert!(indices.remove(&index)); + } + assert!(indices.is_empty()); +} + +#[test] +fn bitmap_concurrent_use() { + const BITS: usize = 16; + const THREADS: usize = 4; + let bitmap = Arc::new(OptionalBitmap::new(BITS)); + for _ in 0..BITS - THREADS { + bitmap.reserve().unwrap(); + } + let mut handles = Vec::with_capacity(THREADS); + let mut barriers = Barrier::new(THREADS); + let (tx, rx) = mpmc::unbounded(); + + for _ in 0..THREADS { + let bitmap = Arc::clone(&bitmap); + let barrier = barriers.pop().unwrap(); + let tx = tx.clone(); + + handles.push(thread::spawn(move || { + barrier.wait(); + let index = bitmap.reserve().unwrap(); + tx.send(index).unwrap(); + })); + } + drop(tx); + for x in rx.iter() { + bitmap.unset(x); + } + for h in handles { + h.join().unwrap(); + } +} + +#[test] +fn buf_slab() { + const COUNT: usize = 16; + const SIZE: usize = 64; + let buf_slab = BufSlab::new(COUNT, SIZE); + + let bufs = (0..COUNT) + .map(|_| { + let buf = buf_slab.alloc().unwrap(); + assert!(buf.len() == SIZE); + buf + }) + .collect::>(); + + assert!(buf_slab.alloc().is_none()); + drop(bufs); + assert!(buf_slab.alloc().is_some()); +} + +#[test] +fn byte_buffer_slab() { + const COUNT: usize = 256; + let slab = Slab::::new(COUNT); + + let bufs = (0..COUNT) + .map(|_| slab.alloc().unwrap()) + .collect::>>(); + + assert!(slab.alloc().is_none()); + drop(bufs); + assert!(slab.alloc().is_some()); +} + +#[test] +fn user_is_send() { + const COUNT: usize = 16; + const SIZE: usize = 1024; + let buf_slab = BufSlab::new(COUNT, SIZE); + + let mut user = buf_slab.alloc().unwrap(); + + let h = thread::spawn(move || { + user[0..5].copy_from_enclave(b"hello"); + }); + + h.join().unwrap(); +} + +fn slab_speed(count: usize) { + let t0 = Instant::now(); + const SIZE: usize = 32; + const N: u32 = 100_000; + let buf_slab = BufSlab::new(count, SIZE); + + let bufs = (0..count - 1).map(|_| buf_slab.alloc().unwrap()).collect::>(); + + let mut x = 0; + for _ in 0..N { + let b = buf_slab.alloc().unwrap(); + x += b.len(); + } + drop(bufs); + drop(buf_slab); + let d = t0.elapsed(); + assert!(x > 0); // prevent the compiler from removing the whole loop above in release mode + println!("count = {} took {:?}", count, d / N); +} + +#[test] +#[ignore] +fn speed_slab() { + println!("\n"); + for i in 3..=16 { + slab_speed(1 << i); + } +} + +#[test] +#[ignore] +fn speed_direct() { + use std::os::fortanix_sgx::usercalls::alloc::User; + + let t0 = Instant::now(); + const SIZE: usize = 32; + const N: u32 = 100_000; + let mut x = 0; + for _ in 0..N { + let b = User::<[u8]>::uninitialized(SIZE); + x += b.len(); + } + let d = t0.elapsed(); + assert!(x > 0); + println!("took {:?}", d / N); +} + +#[test] +fn shared_allocator() { + let a = SharedAllocator::new( + [ + /*32:*/ 2048, /*64:*/ 1024, /*128:*/ 512, /*256:*/ 256, /*512:*/ 128, + /*1K:*/ 64, /*2K:*/ 0, /*4K:*/ 0, /*8K:*/ 0, /*16K:*/ 0, /*32K:*/ 0, + /*64K:*/ 1024, + ], + 1024, + ); + for size in 1..=32 { + let b = a.alloc_buf(size).unwrap(); + assert!(b.len() == 32); + } + for size in 33..=64 { + let b = a.alloc_buf(size).unwrap(); + assert!(b.len() == 64); + } + for &size in &[65, 79, 83, 120, 127, 128] { + let b = a.alloc_buf(size).unwrap(); + assert!(b.len() == 128); + } + for &size in &[129, 199, 210, 250, 255, 256] { + let b = a.alloc_buf(size).unwrap(); + assert!(b.len() == 256); + } + for &size in &[257, 299, 365, 500, 512] { + let b = a.alloc_buf(size).unwrap(); + assert!(b.len() == 512); + } + for &size in &[513, 768, 1023, 1024] { + let b = a.alloc_buf(size).unwrap(); + assert!(b.len() == 1024); + } + for i in 2..=32 { + assert!(a.alloc_buf(i * 1024).is_none()); + } + for i in 33..=64 { + let b = a.alloc_buf(i * 1024).unwrap(); + assert!(b.len() == 64 * 1024); + } +} + +fn alloc_speed(count: usize) { + let t0 = Instant::now(); + const SIZE: usize = 32; + const N: u32 = 100_000; + + let bufs = (0..count - 1).map(|_| super::alloc_buf(SIZE)).collect::>(); + + let mut x = 0; + for _ in 0..N { + let b = super::alloc_buf(SIZE); + x += b.len(); + } + drop(bufs); + let d = t0.elapsed(); + assert!(x > 0); + println!("count = {} took {:?}", count, d / N); +} + +#[test] +#[ignore] +fn speed_overall() { + println!("\n"); + for i in 3..=14 { + alloc_speed(1 << i); + } +} + +#[test] +fn alloc_buf_size() { + let b = super::alloc_buf(32); + assert_eq!(b.len(), 32); + let b = super::alloc_buf(128); + assert_eq!(b.len(), 128); + let b = super::alloc_buf(900); + assert_eq!(b.len(), 1024); + let b = super::alloc_buf(8 * 1024); + assert_eq!(b.len(), 8 * 1024); +} + +#[test] +fn write_buffer_basic() { + const LENGTH: usize = 1024; + let mut write_buffer = WriteBuffer::new(super::alloc_buf(1024)); + + let buf = vec![0u8; LENGTH]; + assert_eq!(write_buffer.write(&buf), LENGTH); + assert_eq!(write_buffer.write(&buf), 0); + + let chunk = write_buffer.consumable_chunk().unwrap(); + write_buffer.consume(chunk, 200); + assert_eq!(write_buffer.write(&buf), 200); + assert_eq!(write_buffer.write(&buf), 0); +} + +#[test] +#[should_panic] +fn call_consumable_chunk_twice() { + const LENGTH: usize = 1024; + let mut write_buffer = WriteBuffer::new(super::alloc_buf(1024)); + + let buf = vec![0u8; LENGTH]; + assert_eq!(write_buffer.write(&buf), LENGTH); + assert_eq!(write_buffer.write(&buf), 0); + + let chunk1 = write_buffer.consumable_chunk().unwrap(); + let _ = write_buffer.consumable_chunk().unwrap(); + drop(chunk1); +} + +#[test] +#[should_panic] +fn consume_wrong_buf() { + const LENGTH: usize = 1024; + let mut write_buffer = WriteBuffer::new(super::alloc_buf(1024)); + + let buf = vec![0u8; LENGTH]; + assert_eq!(write_buffer.write(&buf), LENGTH); + assert_eq!(write_buffer.write(&buf), 0); + + let unrelated_buf: UserBuf = super::alloc_buf(512).into(); + write_buffer.consume(unrelated_buf, 100); +} + +#[test] +fn read_buffer_basic() { + let mut buf = super::alloc_buf(64); + const DATA: &'static [u8] = b"hello"; + buf[0..DATA.len()].copy_from_enclave(DATA); + + let mut read_buffer = ReadBuffer::new(buf, DATA.len()); + assert_eq!(read_buffer.len(), DATA.len()); + assert_eq!(read_buffer.remaining_bytes(), DATA.len()); + let mut buf = [0u8; 8]; + assert_eq!(read_buffer.read(&mut buf), DATA.len()); + assert_eq!(read_buffer.remaining_bytes(), 0); + assert_eq!(&buf, b"hello\0\0\0"); +} diff --git a/async-usercalls/src/batch_drop.rs b/async-usercalls/src/batch_drop.rs new file mode 100644 index 00000000..f27b05c4 --- /dev/null +++ b/async-usercalls/src/batch_drop.rs @@ -0,0 +1,127 @@ +use crate::hacks::Usercall; +use crate::provider_core::ProviderCore; +use ipc_queue::Identified; +use std::cell::RefCell; +use std::mem; +use std::os::fortanix_sgx::usercalls::alloc::{User, UserSafe}; +use std::os::fortanix_sgx::usercalls::raw::UsercallNrs; + +pub trait BatchDropable: private::BatchDropable {} +impl BatchDropable for T {} + +/// Drop the given value at some point in the future (no rush!). This is useful +/// for freeing userspace memory when we don't particularly care about when the +/// buffer is freed. Multiple `free` usercalls are batched together and sent to +/// userspace asynchronously. It is also guaranteed that the memory is freed if +/// the current thread exits before there is a large enough batch. +/// +/// This is mainly an optimization to avoid exitting the enclave for each +/// usercall. Note that even when sending usercalls asynchronously, if the +/// usercall queue is empty we still need to exit the enclave to signal the +/// userspace that the queue is not empty anymore. The batch send would send +/// multiple usercalls and notify the userspace at most once. +pub fn batch_drop(t: T) { + t.batch_drop(); +} + +mod private { + use super::*; + + const BATCH_SIZE: usize = 8; + + struct BatchDropProvider { + core: ProviderCore, + deferred: Vec>, + } + + impl BatchDropProvider { + pub fn new() -> Self { + Self { + core: ProviderCore::new(None), + deferred: Vec::with_capacity(BATCH_SIZE), + } + } + + fn make_progress(&self, deferred: &[Identified]) -> usize { + let sent = self.core.try_send_multiple_usercalls(deferred); + if sent == 0 { + self.core.send_usercall(deferred[0]); + return 1; + } + sent + } + + fn maybe_send_usercall(&mut self, u: Usercall) { + self.deferred.push(self.core.assign_id(u)); + if self.deferred.len() < BATCH_SIZE { + return; + } + let sent = self.make_progress(&self.deferred); + let mut not_sent = self.deferred.split_off(sent); + self.deferred.clear(); + self.deferred.append(&mut not_sent); + } + + pub fn free(&mut self, buf: User) { + let ptr = buf.into_raw(); + let size = unsafe { mem::size_of_val(&mut *ptr) }; + let alignment = T::align_of(); + let ptr = ptr as *mut u8; + let u = Usercall(UsercallNrs::free as _, ptr as _, size as _, alignment as _, 0); + self.maybe_send_usercall(u); + } + } + + impl Drop for BatchDropProvider { + fn drop(&mut self) { + let mut sent = 0; + while sent < self.deferred.len() { + sent += self.make_progress(&self.deferred[sent..]); + } + } + } + + std::thread_local! { + static PROVIDER: RefCell = RefCell::new(BatchDropProvider::new()); + } + + pub trait BatchDropable { + fn batch_drop(self); + } + + impl BatchDropable for User { + fn batch_drop(self) { + PROVIDER.with(|p| p.borrow_mut().free(self)); + } + } +} + +#[cfg(test)] +mod tests { + use super::batch_drop; + use std::os::fortanix_sgx::usercalls::alloc::User; + use std::thread; + + #[test] + fn basic() { + for _ in 0..100 { + batch_drop(User::<[u8]>::uninitialized(100)); + } + } + + #[test] + fn multiple_threads() { + const THREADS: usize = 16; + let mut handles = Vec::with_capacity(THREADS); + for _ in 0..THREADS { + handles.push(thread::spawn(move || { + for _ in 0..1000 { + batch_drop(User::<[u8]>::uninitialized(100)); + } + })); + } + for h in handles { + h.join().unwrap(); + } + } +} diff --git a/async-usercalls/src/callback.rs b/async-usercalls/src/callback.rs new file mode 100644 index 00000000..369ca2b8 --- /dev/null +++ b/async-usercalls/src/callback.rs @@ -0,0 +1,89 @@ +use crate::duplicated::{FromSgxResult, ReturnValue}; +use crate::hacks::Return; +use fortanix_sgx_abi::{Fd, Result as SxgResult}; +use std::io; + +pub struct CbFn(Box); + +impl CbFn { + fn call(self, t: T) { + (self.0)(t); + } +} + +impl From for CbFn +where + F: FnOnce(T) + Send + 'static, +{ + fn from(f: F) -> Self { + Self(Box::new(f)) + } +} + +pub(crate) enum Callback { + Read(CbFn>), + Write(CbFn>), + Flush(CbFn>), + Close(CbFn<()>), + BindStream(CbFn>), + AcceptStream(CbFn>), + ConnectStream(CbFn>), + InsecureTime(CbFn), + Alloc(CbFn>), + Free(CbFn<()>), +} + +impl Callback { + pub(crate) fn call(self, ret: Return) { + use Callback::*; + match self { + Read(cb) => { + let x: (SxgResult, usize) = ReturnValue::from_registers("read", (ret.0, ret.1)); + let x = x.from_sgx_result(); + cb.call(x); + } + Write(cb) => { + let x: (SxgResult, usize) = ReturnValue::from_registers("write", (ret.0, ret.1)); + let x = x.from_sgx_result(); + cb.call(x); + } + Flush(cb) => { + let x: SxgResult = ReturnValue::from_registers("flush", (ret.0, ret.1)); + let x = x.from_sgx_result(); + cb.call(x); + } + Close(cb) => { + assert_eq!((ret.0, ret.1), (0, 0)); + cb.call(()); + } + BindStream(cb) => { + let x: (SxgResult, Fd) = ReturnValue::from_registers("bind_stream", (ret.0, ret.1)); + let x = x.from_sgx_result(); + cb.call(x); + } + AcceptStream(cb) => { + let x: (SxgResult, Fd) = ReturnValue::from_registers("accept_stream", (ret.0, ret.1)); + let x = x.from_sgx_result(); + cb.call(x); + } + ConnectStream(cb) => { + let x: (SxgResult, Fd) = ReturnValue::from_registers("connect_stream", (ret.0, ret.1)); + let x = x.from_sgx_result(); + cb.call(x); + } + InsecureTime(cb) => { + let x: u64 = ReturnValue::from_registers("insecure_time", (ret.0, ret.1)); + cb.call(x); + } + Alloc(cb) => { + let x: (SxgResult, *mut u8) = ReturnValue::from_registers("alloc", (ret.0, ret.1)); + let x = x.from_sgx_result(); + cb.call(x); + } + Free(cb) => { + assert_eq!((ret.0, ret.1), (0, 0)); + cb.call(()); + } + } + } +} diff --git a/async-usercalls/src/duplicated.rs b/async-usercalls/src/duplicated.rs new file mode 100644 index 00000000..0a39e5a1 --- /dev/null +++ b/async-usercalls/src/duplicated.rs @@ -0,0 +1,168 @@ +//! this file contains code duplicated from libstd's sys/sgx +use fortanix_sgx_abi::{Error, Result, RESULT_SUCCESS}; +use std::io; +use std::ptr::NonNull; + +fn check_os_error(err: Result) -> i32 { + // FIXME: not sure how to make sure all variants of Error are covered + if err == Error::NotFound as _ + || err == Error::PermissionDenied as _ + || err == Error::ConnectionRefused as _ + || err == Error::ConnectionReset as _ + || err == Error::ConnectionAborted as _ + || err == Error::NotConnected as _ + || err == Error::AddrInUse as _ + || err == Error::AddrNotAvailable as _ + || err == Error::BrokenPipe as _ + || err == Error::AlreadyExists as _ + || err == Error::WouldBlock as _ + || err == Error::InvalidInput as _ + || err == Error::InvalidData as _ + || err == Error::TimedOut as _ + || err == Error::WriteZero as _ + || err == Error::Interrupted as _ + || err == Error::Other as _ + || err == Error::UnexpectedEof as _ + || ((Error::UserRangeStart as _)..=(Error::UserRangeEnd as _)).contains(&err) + { + err + } else { + panic!("Usercall: returned invalid error value {}", err) + } +} + +pub trait FromSgxResult { + type Return; + + fn from_sgx_result(self) -> io::Result; +} + +impl FromSgxResult for (Result, T) { + type Return = T; + + fn from_sgx_result(self) -> io::Result { + if self.0 == RESULT_SUCCESS { + Ok(self.1) + } else { + Err(io::Error::from_raw_os_error(check_os_error(self.0))) + } + } +} + +impl FromSgxResult for Result { + type Return = (); + + fn from_sgx_result(self) -> io::Result { + if self == RESULT_SUCCESS { + Ok(()) + } else { + Err(io::Error::from_raw_os_error(check_os_error(self))) + } + } +} + +type Register = u64; + +pub trait RegisterArgument { + fn from_register(_: Register) -> Self; + fn into_register(self) -> Register; +} + +pub trait ReturnValue { + fn from_registers(call: &'static str, regs: (Register, Register)) -> Self; +} + +macro_rules! define_ra { + (< $i:ident > $t:ty) => { + impl<$i> RegisterArgument for $t { + fn from_register(a: Register) -> Self { + a as _ + } + fn into_register(self) -> Register { + self as _ + } + } + }; + ($i:ty as $t:ty) => { + impl RegisterArgument for $t { + fn from_register(a: Register) -> Self { + a as $i as _ + } + fn into_register(self) -> Register { + self as $i as _ + } + } + }; + ($t:ty) => { + impl RegisterArgument for $t { + fn from_register(a: Register) -> Self { + a as _ + } + fn into_register(self) -> Register { + self as _ + } + } + }; +} + +define_ra!(Register); +define_ra!(i64); +define_ra!(u32); +define_ra!(u32 as i32); +define_ra!(u16); +define_ra!(u16 as i16); +define_ra!(u8); +define_ra!(u8 as i8); +define_ra!(usize); +define_ra!(usize as isize); +define_ra!( *const T); +define_ra!( *mut T); + +impl RegisterArgument for bool { + fn from_register(a: Register) -> bool { + if a != 0 { + true + } else { + false + } + } + fn into_register(self) -> Register { + self as _ + } +} + +impl RegisterArgument for Option> { + fn from_register(a: Register) -> Option> { + NonNull::new(a as _) + } + fn into_register(self) -> Register { + self.map_or(0 as _, NonNull::as_ptr) as _ + } +} + +impl ReturnValue for ! { + fn from_registers(call: &'static str, _regs: (Register, Register)) -> Self { + panic!("Usercall {}: did not expect to be re-entered", call); + } +} + +impl ReturnValue for () { + fn from_registers(_call: &'static str, usercall_retval: (Register, Register)) -> Self { + assert!(usercall_retval.0 == 0); + assert!(usercall_retval.1 == 0); + () + } +} + +impl ReturnValue for T { + fn from_registers(_call: &'static str, usercall_retval: (Register, Register)) -> Self { + assert!(usercall_retval.1 == 0); + T::from_register(usercall_retval.0) + } +} + +impl ReturnValue for (T, U) { + fn from_registers(_call: &'static str, regs: (Register, Register)) -> Self { + (T::from_register(regs.0), U::from_register(regs.1)) + } +} diff --git a/async-usercalls/src/hacks/async_queues.rs b/async-usercalls/src/hacks/async_queues.rs new file mode 100644 index 00000000..a325a28f --- /dev/null +++ b/async-usercalls/src/hacks/async_queues.rs @@ -0,0 +1,50 @@ +use super::{Cancel, Return, Usercall}; +use crate::duplicated::ReturnValue; +use fortanix_sgx_abi::FifoDescriptor; +use std::num::NonZeroU64; +use std::os::fortanix_sgx::usercalls; +use std::os::fortanix_sgx::usercalls::raw; +use std::{mem, ptr}; + +// TODO: remove these once support for cancel queue is added in `std::os::fortanix_sgx` + +pub unsafe fn async_queues( + usercall_queue: *mut FifoDescriptor, + return_queue: *mut FifoDescriptor, + cancel_queue: *mut FifoDescriptor, +) -> raw::Result { + ReturnValue::from_registers( + "async_queues", + raw::do_usercall( + NonZeroU64::new(raw::UsercallNrs::async_queues as _).unwrap(), + usercall_queue as _, + return_queue as _, + cancel_queue as _, + 0, + false, + ), + ) +} + +pub unsafe fn alloc_descriptor() -> *mut FifoDescriptor { + usercalls::alloc( + mem::size_of::>(), + mem::align_of::>(), + ) + .expect("failed to allocate userspace memory") as _ +} + +pub unsafe fn to_enclave(ptr: *mut FifoDescriptor) -> FifoDescriptor { + let mut dest: FifoDescriptor = mem::zeroed(); + ptr::copy( + ptr as *const u8, + (&mut dest) as *mut FifoDescriptor as *mut u8, + mem::size_of_val(&mut dest), + ); + usercalls::free( + ptr as _, + mem::size_of::>(), + mem::align_of::>(), + ); + dest +} diff --git a/async-usercalls/src/hacks/mod.rs b/async-usercalls/src/hacks/mod.rs new file mode 100644 index 00000000..f04f3655 --- /dev/null +++ b/async-usercalls/src/hacks/mod.rs @@ -0,0 +1,61 @@ +use std::ops::{Deref, DerefMut}; +use std::os::fortanix_sgx::usercalls::alloc::UserSafeSized; +use std::os::fortanix_sgx::usercalls::raw::ByteBuffer; + +mod async_queues; +mod unsafe_typecasts; + +pub use self::async_queues::{alloc_descriptor, async_queues, to_enclave}; +pub use self::unsafe_typecasts::{new_std_listener, new_std_stream}; + +#[repr(C)] +#[derive(Copy, Clone, Default)] +pub struct Usercall(pub u64, pub u64, pub u64, pub u64, pub u64); + +unsafe impl UserSafeSized for Usercall {} + +#[repr(C)] +#[derive(Copy, Clone, Default)] +pub struct Return(pub u64, pub u64); + +unsafe impl UserSafeSized for Return {} + +#[repr(C)] +#[derive(Copy, Clone, Default)] +pub struct Cancel { + /// Reserved for future use. + pub reserved: u64, +} + +unsafe impl UserSafeSized for Cancel {} + +// Interim solution until we mark the target types appropriately +pub(crate) struct MakeSend(T); + +impl MakeSend { + pub fn new(t: T) -> Self { + Self(t) + } + + #[allow(unused)] + pub fn into_inner(self) -> T { + self.0 + } +} + +impl Deref for MakeSend { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for MakeSend { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +unsafe impl Send for MakeSend {} +unsafe impl Send for MakeSend> {} diff --git a/async-usercalls/src/hacks/unsafe_typecasts.rs b/async-usercalls/src/hacks/unsafe_typecasts.rs new file mode 100644 index 00000000..1e3d67c5 --- /dev/null +++ b/async-usercalls/src/hacks/unsafe_typecasts.rs @@ -0,0 +1,95 @@ +//! The incredibly unsafe code in this module allows us to create +//! `std::net::TcpStream` and `std::net::TcpListener` types from their raw +//! components in SGX. +//! +//! This is obviously very unsafe and not maintainable and is only intended as +//! an iterim solution until we add similar functionality as extension traits +//! in `std::os::fortanix_sgx`. +use fortanix_sgx_abi::Fd; + +mod sgx { + use fortanix_sgx_abi::Fd; + use std::sync::Arc; + + #[derive(Debug)] + pub struct FileDesc { + fd: Fd, + } + + #[derive(Debug, Clone)] + pub struct Socket { + inner: Arc, + local_addr: Option, + } + + #[derive(Clone)] + pub struct TcpStream { + inner: Socket, + peer_addr: Option, + } + + impl TcpStream { + pub fn new(fd: Fd, local_addr: Option, peer_addr: Option) -> TcpStream { + TcpStream { + inner: Socket { + inner: Arc::new(FileDesc { fd }), + local_addr, + }, + peer_addr, + } + } + } + + #[derive(Clone)] + pub struct TcpListener { + inner: Socket, + } + + impl TcpListener { + pub fn new(fd: Fd, local_addr: Option) -> TcpListener { + TcpListener { + inner: Socket { + inner: Arc::new(FileDesc { fd }), + local_addr, + }, + } + } + } +} + +struct TcpStream(self::sgx::TcpStream); +struct TcpListener(self::sgx::TcpListener); + +pub unsafe fn new_std_stream(fd: Fd, local_addr: Option, peer_addr: Option) -> std::net::TcpStream { + let stream = TcpStream(sgx::TcpStream::new(fd, local_addr, peer_addr)); + std::mem::transmute(stream) +} + +pub unsafe fn new_std_listener(fd: Fd, local_addr: Option) -> std::net::TcpListener { + let listener = TcpListener(sgx::TcpListener::new(fd, local_addr)); + std::mem::transmute(listener) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::mem; + use std::os::fortanix_sgx::io::AsRawFd; + + #[test] + fn sanity_check() { + let fd = 42; + let local = "1.2.3.4:1234"; + let peer = "5.6.7.8:443"; + let stream = unsafe { new_std_stream(fd, Some(local.to_owned()), Some(peer.to_owned())) }; + assert_eq!(stream.as_raw_fd(), fd); + assert_eq!(stream.local_addr().unwrap().to_string(), local); + assert_eq!(stream.peer_addr().unwrap().to_string(), peer); + mem::forget(stream); // not a real stream... + + let listener = unsafe { new_std_listener(fd, Some(local.to_owned())) }; + assert_eq!(listener.as_raw_fd(), fd); + assert_eq!(listener.local_addr().unwrap().to_string(), local); + mem::forget(listener); // not a real listener... + } +} diff --git a/async-usercalls/src/lib.rs b/async-usercalls/src/lib.rs new file mode 100644 index 00000000..45c37781 --- /dev/null +++ b/async-usercalls/src/lib.rs @@ -0,0 +1,165 @@ +#![feature(sgx_platform)] +#![feature(never_type)] +#![cfg_attr(test, feature(unboxed_closures))] +#![cfg_attr(test, feature(fn_traits))] + +use crossbeam_channel as mpmc; +use ipc_queue::Identified; +use std::collections::HashMap; +use std::os::fortanix_sgx::usercalls::raw::UsercallNrs; +use std::panic; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; +use std::thread::{self, JoinHandle}; + +mod alloc; +mod batch_drop; +mod callback; +mod duplicated; +mod hacks; +mod provider_api; +mod provider_core; +mod queues; +mod raw; +#[cfg(test)] +mod tests; + +pub use self::alloc::{alloc_buf, alloc_byte_buffer, ReadBuffer, User, UserBuf, UserSafeExt, WriteBuffer}; +pub use self::batch_drop::batch_drop; +pub use self::callback::CbFn; +pub use self::raw::RawApi; + +use self::callback::*; +use self::hacks::{Cancel, Return, Usercall}; +use self::provider_core::ProviderCore; +use self::queues::*; + +pub struct CancelHandle<'p> { + c: Identified, + tx: &'p Sender, +} + +impl<'p> CancelHandle<'p> { + pub fn cancel(self) { + self.tx.send(self.c).expect("failed to send cancellation"); + } + + pub(crate) fn new(c: Identified, tx: &'p Sender) -> Self { + CancelHandle { c, tx } + } +} + +/// This type provides a mechanism for submitting usercalls asynchronously. +/// Usercalls are sent to the enclave runner through a queue. The results are +/// retrieved on a dedicated thread. Users are notified of the results through +/// callback functions. +/// +/// Users of this type should take care not to block execution in callbacks. +/// Ceratin usercalls can be cancelled through a handle, but note that it is +/// still possible to receive successful results for cancelled usercalls. +pub struct AsyncUsercallProvider { + core: ProviderCore, + callback_tx: mpmc::Sender<(u64, Callback)>, + shutdown: Arc, + join_handle: Option>, +} + +impl AsyncUsercallProvider { + pub fn new() -> Self { + let (return_tx, return_rx) = mpmc::unbounded(); + let core = ProviderCore::new(Some(return_tx)); + let (callback_tx, callback_rx) = mpmc::unbounded(); + let shutdown = Arc::new(AtomicBool::new(false)); + let callback_handler = CallbackHandler { + return_rx, + callback_rx, + shutdown: Arc::clone(&shutdown), + }; + let join_handle = thread::spawn(move || callback_handler.run()); + Self { + core, + callback_tx, + shutdown, + join_handle: Some(join_handle), + } + } + + #[cfg(test)] + pub(crate) fn provider_id(&self) -> u32 { + self.core.provider_id() + } + + fn send_usercall(&self, usercall: Usercall, callback: Option) -> CancelHandle { + let usercall = self.core.assign_id(usercall); + if let Some(callback) = callback { + self.callback_tx + .send((usercall.id, callback)) + .expect("failed to send callback"); + } + self.core.send_usercall(usercall) + } +} + +impl Drop for AsyncUsercallProvider { + fn drop(&mut self) { + self.shutdown.store(true, Ordering::Release); + // send a usercall to ensure CallbackHandler wakes up and breaks its loop. + let u = Usercall(UsercallNrs::insecure_time as _, 0, 0, 0, 0); + self.send_usercall(u, None); + let join_handle = self.join_handle.take().unwrap(); + join_handle.join().unwrap(); + } +} + +struct CallbackHandler { + return_rx: mpmc::Receiver>, + callback_rx: mpmc::Receiver<(u64, Callback)>, + shutdown: Arc, +} + +impl CallbackHandler { + const BATCH: usize = 1024; + + fn recv_returns(&self) -> ([Identified; Self::BATCH], usize) { + let first = self.return_rx.recv().expect("channel closed unexpectedly"); + let mut returns = [Identified { + id: 0, + data: Return(0, 0), + }; Self::BATCH]; + let mut count = 0; + for ret in std::iter::once(first).chain(self.return_rx.try_iter().take(Self::BATCH - 1)) { + returns[count] = ret; + count += 1; + } + (returns, count) + } + + fn run(self) { + let mut callbacks = HashMap::with_capacity(256); + loop { + // block until there are some returns + let (returns, count) = self.recv_returns(); + // receive pending callbacks + for (id, callback) in self.callback_rx.try_iter() { + callbacks.insert(id, callback); + } + for ret in &returns[..count] { + if let Some(cb) = callbacks.remove(&ret.id) { + let _r = panic::catch_unwind(panic::AssertUnwindSafe(move || { + cb.call(ret.data); + })); + // if let Err(e) = _r { + // let msg = e + // .downcast_ref::() + // .map(String::as_str) + // .or_else(|| e.downcast_ref::<&str>().map(|&s| s)); + // println!("callback paniced: {:?}", msg); + // } + } + } + if self.shutdown.load(Ordering::Acquire) { + break; + } + } + } +} diff --git a/async-usercalls/src/provider_api.rs b/async-usercalls/src/provider_api.rs new file mode 100644 index 00000000..087a22ee --- /dev/null +++ b/async-usercalls/src/provider_api.rs @@ -0,0 +1,274 @@ +use crate::alloc::{alloc_buf, alloc_byte_buffer, User, UserBuf}; +use crate::batch_drop; +use crate::hacks::{new_std_listener, new_std_stream, MakeSend}; +use crate::raw::RawApi; +use crate::{AsyncUsercallProvider, CancelHandle}; +use fortanix_sgx_abi::Fd; +use std::io; +use std::mem::{self, ManuallyDrop}; +use std::net::{TcpListener, TcpStream}; +use std::os::fortanix_sgx::usercalls::alloc::{User as StdUser, UserRef, UserSafe}; +use std::os::fortanix_sgx::usercalls::raw::ByteBuffer; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; + +impl AsyncUsercallProvider { + /// Sends an asynchronous `read` usercall. `callback` is called when a + /// return value is received from userspace. `read_buf` is returned as an + /// argument to `callback` along with the result of the `read` usercall. + /// + /// Returns a handle that can be used to cancel the usercall if desired. + /// Please refer to the type-level documentation for general notes about + /// callbacks. + pub fn read(&self, fd: Fd, read_buf: User<[u8]>, callback: F) -> CancelHandle + where + F: FnOnce(io::Result, User<[u8]>) + Send + 'static, + { + let mut read_buf = ManuallyDrop::new(read_buf); + let ptr = read_buf.as_mut_ptr(); + let len = read_buf.len(); + let cb = move |res: io::Result| { + let read_buf = ManuallyDrop::into_inner(read_buf); + callback(res, read_buf); + }; + unsafe { self.raw_read(fd, ptr, len, Some(cb.into())) } + } + + /// Sends an asynchronous `write` usercall. `callback` is called when a + /// return value is received from userspace. `write_buf` is returned as an + /// argument to `callback` along with the result of the `write` usercall. + /// + /// Returns a handle that can be used to cancel the usercall if desired. + /// Please refer to the type-level documentation for general notes about + /// callbacks. + pub fn write(&self, fd: Fd, write_buf: UserBuf, callback: F) -> CancelHandle + where + F: FnOnce(io::Result, UserBuf) + Send + 'static, + { + let mut write_buf = ManuallyDrop::new(write_buf); + let ptr = write_buf.as_mut_ptr(); + let len = write_buf.len(); + let cb = move |res| { + let write_buf = ManuallyDrop::into_inner(write_buf); + callback(res, write_buf); + }; + unsafe { self.raw_write(fd, ptr, len, Some(cb.into())) } + } + + /// Sends an asynchronous `flush` usercall. `callback` is called when a + /// return value is received from userspace. + /// + /// Please refer to the type-level documentation for general notes about + /// callbacks. + pub fn flush(&self, fd: Fd, callback: F) + where + F: FnOnce(io::Result<()>) + Send + 'static, + { + unsafe { + self.raw_flush(fd, Some(callback.into())); + } + } + + /// Sends an asynchronous `close` usercall. If specified, `callback` is + /// called when a return is received from userspace. + /// + /// Please refer to the type-level documentation for general notes about + /// callbacks. + pub fn close(&self, fd: Fd, callback: Option) + where + F: FnOnce() + Send + 'static, + { + let cb = callback.map(|callback| move |()| callback()); + unsafe { + self.raw_close(fd, cb.map(Into::into)); + } + } + + /// Sends an asynchronous `bind_stream` usercall. `callback` is called when + /// a return value is received from userspace. + /// + /// Please refer to the type-level documentation for general notes about + /// callbacks. + pub fn bind_stream(&self, addr: &str, callback: F) + where + F: FnOnce(io::Result) + Send + 'static, + { + let mut addr_buf = ManuallyDrop::new(alloc_buf(addr.len())); + let mut local_addr = ManuallyDrop::new(MakeSend::new(alloc_byte_buffer())); + + addr_buf[0..addr.len()].copy_from_enclave(addr.as_bytes()); + let addr_buf_ptr = addr_buf.as_raw_mut_ptr() as *mut u8; + let local_addr_ptr = local_addr.as_raw_mut_ptr(); + + let cb = move |res: io::Result| { + let _addr_buf = ManuallyDrop::into_inner(addr_buf); + let local_addr = ManuallyDrop::into_inner(local_addr); + + let local = string_from_bytebuffer(&local_addr, "bind_stream", "local_addr"); + let res = res.map(|fd| unsafe { new_std_listener(fd, Some(local)) }); + callback(res); + }; + unsafe { self.raw_bind_stream(addr_buf_ptr, addr.len(), local_addr_ptr, Some(cb.into())) } + } + + /// Sends an asynchronous `accept_stream` usercall. `callback` is called + /// when a return value is received from userspace. + /// + /// Returns a handle that can be used to cancel the usercall if desired. + /// Please refer to the type-level documentation for general notes about + /// callbacks. + pub fn accept_stream(&self, fd: Fd, callback: F) -> CancelHandle + where + F: FnOnce(io::Result) + Send + 'static, + { + let mut local_addr = ManuallyDrop::new(MakeSend::new(alloc_byte_buffer())); + let mut peer_addr = ManuallyDrop::new(MakeSend::new(alloc_byte_buffer())); + + let local_addr_ptr = local_addr.as_raw_mut_ptr(); + let peer_addr_ptr = peer_addr.as_raw_mut_ptr(); + + let cb = move |res: io::Result| { + let local_addr = ManuallyDrop::into_inner(local_addr); + let peer_addr = ManuallyDrop::into_inner(peer_addr); + + let local = string_from_bytebuffer(&*local_addr, "accept_stream", "local_addr"); + let peer = string_from_bytebuffer(&*peer_addr, "accept_stream", "peer_addr"); + let res = res.map(|fd| unsafe { new_std_stream(fd, Some(local), Some(peer)) }); + callback(res); + }; + unsafe { self.raw_accept_stream(fd, local_addr_ptr, peer_addr_ptr, Some(cb.into())) } + } + + /// Sends an asynchronous `connect_stream` usercall. `callback` is called + /// when a return value is received from userspace. + /// + /// Returns a handle that can be used to cancel the usercall if desired. + /// Please refer to the type-level documentation for general notes about + /// callbacks. + pub fn connect_stream(&self, addr: &str, callback: F) -> CancelHandle + where + F: FnOnce(io::Result) + Send + 'static, + { + let mut addr_buf = ManuallyDrop::new(alloc_buf(addr.len())); + let mut local_addr = ManuallyDrop::new(MakeSend::new(alloc_byte_buffer())); + let mut peer_addr = ManuallyDrop::new(MakeSend::new(alloc_byte_buffer())); + + addr_buf[0..addr.len()].copy_from_enclave(addr.as_bytes()); + let addr_buf_ptr = addr_buf.as_raw_mut_ptr() as *mut u8; + let local_addr_ptr = local_addr.as_raw_mut_ptr(); + let peer_addr_ptr = peer_addr.as_raw_mut_ptr(); + + let cb = move |res: io::Result| { + let _addr_buf = ManuallyDrop::into_inner(addr_buf); + let local_addr = ManuallyDrop::into_inner(local_addr); + let peer_addr = ManuallyDrop::into_inner(peer_addr); + + let local = string_from_bytebuffer(&local_addr, "connect_stream", "local_addr"); + let peer = string_from_bytebuffer(&peer_addr, "connect_stream", "peer_addr"); + let res = res.map(|fd| unsafe { new_std_stream(fd, Some(local), Some(peer)) }); + callback(res); + }; + unsafe { self.raw_connect_stream(addr_buf_ptr, addr.len(), local_addr_ptr, peer_addr_ptr, Some(cb.into())) } + } + + /// Sends an asynchronous `alloc` usercall to allocate one instance of `T` + /// in userspace. `callback` is called when a return value is received from + /// userspace. + /// + /// Please refer to the type-level documentation for general notes about + /// callbacks. + pub fn alloc(&self, callback: F) + where + T: UserSafe, + F: FnOnce(io::Result>) + Send + 'static, + { + let cb = move |res: io::Result<*mut u8>| { + let res = res.map(|ptr| unsafe { StdUser::::from_raw(ptr as _) }); + callback(res); + }; + unsafe { + self.raw_alloc(mem::size_of::(), T::align_of(), Some(cb.into())); + } + } + + /// Sends an asynchronous `alloc` usercall to allocate a slice of `T` in + /// userspace with the specified `len`. `callback` is called when a return + /// value is received from userspace. + /// + /// Please refer to the type-level documentation for general notes about + /// callbacks. + pub fn alloc_slice(&self, len: usize, callback: F) + where + [T]: UserSafe, + F: FnOnce(io::Result>) + Send + 'static, + { + let cb = move |res: io::Result<*mut u8>| { + let res = res.map(|ptr| unsafe { StdUser::<[T]>::from_raw_parts(ptr as _, len) }); + callback(res); + }; + unsafe { + self.raw_alloc(len * mem::size_of::(), <[T]>::align_of(), Some(cb.into())); + } + } + + /// Sends an asynchronous `free` usercall to deallocate the userspace + /// buffer `buf`. If specified, `callback` is called when a return is + /// received from userspace. + /// + /// Please refer to the type-level documentation for general notes about + /// callbacks. + pub fn free(&self, mut buf: StdUser, callback: Option) + where + T: ?Sized + UserSafe, + F: FnOnce() + Send + 'static, + { + let ptr = buf.as_raw_mut_ptr(); + let cb = callback.map(|callback| move |()| callback()); + unsafe { + self.raw_free( + buf.into_raw() as _, + mem::size_of_val(&mut *ptr), + T::align_of(), + cb.map(Into::into), + ); + } + } + + /// Sends an asynchronous `insecure_time` usercall. `callback` is called + /// when a return value is received from userspace. + /// + /// Please refer to the type-level documentation for general notes about + /// callbacks. + pub fn insecure_time(&self, callback: F) + where + F: FnOnce(SystemTime) + Send + 'static, + { + let cb = move |nanos_since_epoch| { + let t = UNIX_EPOCH + Duration::from_nanos(nanos_since_epoch); + callback(t); + }; + unsafe { + self.raw_insecure_time(Some(cb.into())); + } + } +} + +fn string_from_bytebuffer(buf: &UserRef, usercall: &str, arg: &str) -> String { + String::from_utf8(copy_user_buffer(buf)) + .unwrap_or_else(|_| panic!("Usercall {}: expected {} to be valid UTF-8", usercall, arg)) +} + +// adapted from libstd sys/sgx/abi/usercalls/alloc.rs +fn copy_user_buffer(buf: &UserRef) -> Vec { + unsafe { + let buf = buf.to_enclave(); + if buf.len > 0 { + let user = StdUser::from_raw_parts(buf.data as _, buf.len); + let v = user.to_enclave(); + batch_drop(user); + v + } else { + // Mustn't look at `data` or call `free` if `len` is `0`. + Vec::new() + } + } +} diff --git a/async-usercalls/src/provider_core.rs b/async-usercalls/src/provider_core.rs new file mode 100644 index 00000000..6d0025a7 --- /dev/null +++ b/async-usercalls/src/provider_core.rs @@ -0,0 +1,69 @@ +use crate::hacks::{Cancel, Return, Usercall}; +use crate::queues::*; +use crate::CancelHandle; +use crossbeam_channel as mpmc; +use ipc_queue::Identified; +use std::sync::atomic::{AtomicU32, Ordering}; + +pub(crate) struct ProviderCore { + usercall_tx: Sender, + cancel_tx: Sender, + provider_id: u32, + next_id: AtomicU32, +} + +impl ProviderCore { + pub fn new(return_tx: Option>>) -> ProviderCore { + let (usercall_tx, cancel_tx, provider_id) = PROVIDERS.new_provider(return_tx); + ProviderCore { + usercall_tx, + cancel_tx, + provider_id, + next_id: AtomicU32::new(1), + } + } + + #[cfg(test)] + pub fn provider_id(&self) -> u32 { + self.provider_id + } + + fn next_id(&self) -> u32 { + let id = self.next_id.fetch_add(1, Ordering::Relaxed); + match id { + 0 => self.next_id(), + _ => id, + } + } + + pub fn assign_id(&self, usercall: Usercall) -> Identified { + let id = self.next_id(); + Identified { + id: ((self.provider_id as u64) << 32) | id as u64, + data: usercall, + } + } + + pub fn send_usercall(&self, usercall: Identified) -> CancelHandle { + assert!(usercall.id != 0); + let cancel = Identified { + id: usercall.id, + data: Cancel { + reserved: 0, + }, + }; + self.usercall_tx.send(usercall).expect("failed to send async usercall"); + CancelHandle::new(cancel, &self.cancel_tx) + } + + // returns the number of usercalls successfully sent. + pub fn try_send_multiple_usercalls(&self, usercalls: &[Identified]) -> usize { + self.usercall_tx.try_send_multiple(usercalls).unwrap_or(0) + } +} + +impl Drop for ProviderCore { + fn drop(&mut self) { + PROVIDERS.remove_provider(self.provider_id); + } +} diff --git a/async-usercalls/src/queues.rs b/async-usercalls/src/queues.rs new file mode 100644 index 00000000..18c9eade --- /dev/null +++ b/async-usercalls/src/queues.rs @@ -0,0 +1,188 @@ +use crate::hacks::{alloc_descriptor, async_queues, to_enclave, Cancel, Return, Usercall}; +use crossbeam_channel as mpmc; +use fortanix_sgx_abi::{EV_CANCELQ_NOT_FULL, EV_RETURNQ_NOT_EMPTY, EV_USERCALLQ_NOT_FULL}; +use ipc_queue::{self, Identified, QueueEvent, RecvError, SynchronizationError, Synchronizer}; +use lazy_static::lazy_static; +use std::os::fortanix_sgx::usercalls::raw; +use std::sync::{Arc, Mutex}; +use std::{io, iter, thread}; + +pub(crate) type Sender = ipc_queue::Sender; +pub(crate) type Receiver = ipc_queue::Receiver; + +pub(crate) struct Providers { + usercall_queue_tx: Sender, + cancel_queue_tx: Sender, + provider_map: Arc>>>>>, +} + +impl Providers { + pub(crate) fn new_provider( + &self, + return_tx: Option>>, + ) -> (Sender, Sender, u32) { + let id = self.provider_map.lock().unwrap().insert(return_tx); + let usercall_queue_tx = self.usercall_queue_tx.clone(); + let cancel_queue_tx = self.cancel_queue_tx.clone(); + (usercall_queue_tx, cancel_queue_tx, id) + } + + pub(crate) fn remove_provider(&self, id: u32) { + let entry = self.provider_map.lock().unwrap().remove(id); + assert!(entry.is_some()); + } +} + +lazy_static! { + pub(crate) static ref PROVIDERS: Providers = { + let (utx, ctx, rx) = init_async_queues().expect("Failed to initialize async queues"); + let provider_map = Arc::new(Mutex::new(Map::new())); + let return_handler = ReturnHandler { + return_queue_rx: rx, + provider_map: Arc::clone(&provider_map), + }; + thread::spawn(move || return_handler.run()); + Providers { + usercall_queue_tx: utx, + cancel_queue_tx: ctx, + provider_map, + } + }; +} + +fn init_async_queues() -> io::Result<(Sender, Sender, Receiver)> { + // FIXME: this is just a hack. Replace these with `User::>::uninitialized().into_raw()` + let usercall_q = unsafe { alloc_descriptor::() }; + let cancel_q = unsafe { alloc_descriptor::() }; + let return_q = unsafe { alloc_descriptor::() }; + + let r = unsafe { async_queues(usercall_q, return_q, cancel_q) }; + if r != 0 { + return Err(io::Error::from_raw_os_error(r)); + } + + // FIXME: this is another hack, replace with `unsafe { User::>::from_raw(q) }.to_enclave()` + let usercall_queue = unsafe { to_enclave(usercall_q) }; + let cancel_queue = unsafe { to_enclave(cancel_q) }; + let return_queue = unsafe { to_enclave(return_q) }; + + let utx = unsafe { Sender::from_descriptor(usercall_queue, QueueSynchronizer { queue: Queue::Usercall }) }; + let ctx = unsafe { Sender::from_descriptor(cancel_queue, QueueSynchronizer { queue: Queue::Cancel }) }; + let rx = unsafe { Receiver::from_descriptor(return_queue, QueueSynchronizer { queue: Queue::Return }) }; + Ok((utx, ctx, rx)) +} + +struct ReturnHandler { + return_queue_rx: Receiver, + provider_map: Arc>>>>>, +} + +impl ReturnHandler { + const N: usize = 1024; + + fn send(&self, returns: &[Identified]) { + // This should hold the lock only for a short amount of time + // since mpmc::Sender::send() will not block (unbounded channel). + // Also note that the lock is uncontested most of the time, so + // taking the lock should be fast. + let provider_map = self.provider_map.lock().unwrap(); + for ret in returns { + let provider_id = (ret.id >> 32) as u32; + if let Some(sender) = provider_map.get(provider_id).and_then(|entry| entry.as_ref()) { + let _ = sender.send(*ret); + } + } + } + + fn run(self) { + const DEFAULT_RETURN: Identified = Identified { + id: 0, + data: Return(0, 0), + }; + loop { + let mut returns = [DEFAULT_RETURN; Self::N]; + let first = match self.return_queue_rx.recv() { + Ok(ret) => ret, + Err(RecvError::Closed) => break, + }; + let mut count = 0; + for ret in iter::once(first).chain(self.return_queue_rx.try_iter().take(Self::N - 1)) { + assert!(ret.id != 0); + returns[count] = ret; + count += 1; + } + self.send(&returns[..count]); + } + } +} + +#[derive(Clone, Copy, Debug)] +enum Queue { + Usercall, + Return, + Cancel, +} + +#[derive(Clone, Debug)] +pub(crate) struct QueueSynchronizer { + queue: Queue, +} + +impl Synchronizer for QueueSynchronizer { + fn wait(&self, event: QueueEvent) -> Result<(), SynchronizationError> { + let ev = match (self.queue, event) { + (Queue::Usercall, QueueEvent::NotEmpty) => panic!("enclave should not recv on usercall queue"), + (Queue::Cancel, QueueEvent::NotEmpty) => panic!("enclave should not recv on cancel queue"), + (Queue::Return, QueueEvent::NotFull) => panic!("enclave should not send on return queue"), + (Queue::Usercall, QueueEvent::NotFull) => EV_USERCALLQ_NOT_FULL, + (Queue::Cancel, QueueEvent::NotFull) => EV_CANCELQ_NOT_FULL, + (Queue::Return, QueueEvent::NotEmpty) => EV_RETURNQ_NOT_EMPTY, + }; + unsafe { + raw::wait(ev, raw::WAIT_INDEFINITE); + } + Ok(()) + } + + fn notify(&self, _event: QueueEvent) { + // any synchronous usercall would do + unsafe { + raw::wait(0, raw::WAIT_NO); + } + } +} + +use self::map::Map; +mod map { + use fnv::FnvHashMap; + + pub struct Map { + map: FnvHashMap, + next_id: u32, + } + + impl Map { + pub fn new() -> Self { + Self { + map: FnvHashMap::with_capacity_and_hasher(16, Default::default()), + next_id: 0, + } + } + + pub fn insert(&mut self, value: T) -> u32 { + let id = self.next_id; + self.next_id += 1; + let old = self.map.insert(id, value); + debug_assert!(old.is_none()); + id + } + + pub fn get(&self, id: u32) -> Option<&T> { + self.map.get(&id) + } + + pub fn remove(&mut self, id: u32) -> Option { + self.map.remove(&id) + } + } +} diff --git a/async-usercalls/src/raw.rs b/async-usercalls/src/raw.rs new file mode 100644 index 00000000..fb2d4fac --- /dev/null +++ b/async-usercalls/src/raw.rs @@ -0,0 +1,155 @@ +use crate::callback::*; +use crate::hacks::Usercall; +use crate::{AsyncUsercallProvider, CancelHandle}; +use fortanix_sgx_abi::Fd; +use std::io; +use std::os::fortanix_sgx::usercalls::raw::ByteBuffer; +use std::os::fortanix_sgx::usercalls::raw::UsercallNrs; + +pub trait RawApi { + unsafe fn raw_read( + &self, + fd: Fd, + buf: *mut u8, + len: usize, + callback: Option>>, + ) -> CancelHandle; + + unsafe fn raw_write( + &self, + fd: Fd, + buf: *const u8, + len: usize, + callback: Option>>, + ) -> CancelHandle; + + unsafe fn raw_flush(&self, fd: Fd, callback: Option>>); + + unsafe fn raw_close(&self, fd: Fd, callback: Option>); + + unsafe fn raw_bind_stream( + &self, + addr: *const u8, + len: usize, + local_addr: *mut ByteBuffer, + callback: Option>>, + ); + + unsafe fn raw_accept_stream( + &self, + fd: Fd, + local_addr: *mut ByteBuffer, + peer_addr: *mut ByteBuffer, + callback: Option>>, + ) -> CancelHandle; + + unsafe fn raw_connect_stream( + &self, + addr: *const u8, + len: usize, + local_addr: *mut ByteBuffer, + peer_addr: *mut ByteBuffer, + callback: Option>>, + ) -> CancelHandle; + + unsafe fn raw_insecure_time(&self, callback: Option>); + + unsafe fn raw_alloc(&self, size: usize, alignment: usize, callback: Option>>); + + unsafe fn raw_free(&self, ptr: *mut u8, size: usize, alignment: usize, callback: Option>); +} + +impl RawApi for AsyncUsercallProvider { + unsafe fn raw_read( + &self, + fd: Fd, + buf: *mut u8, + len: usize, + callback: Option>>, + ) -> CancelHandle { + let u = Usercall(UsercallNrs::read as _, fd as _, buf as _, len as _, 0); + self.send_usercall(u, callback.map(|cb| Callback::Read(cb))) + } + + unsafe fn raw_write( + &self, + fd: Fd, + buf: *const u8, + len: usize, + callback: Option>>, + ) -> CancelHandle { + let u = Usercall(UsercallNrs::write as _, fd as _, buf as _, len as _, 0); + self.send_usercall(u, callback.map(|cb| Callback::Write(cb))) + } + + unsafe fn raw_flush(&self, fd: Fd, callback: Option>>) { + let u = Usercall(UsercallNrs::flush as _, fd as _, 0, 0, 0); + self.send_usercall(u, callback.map(|cb| Callback::Flush(cb))); + } + + unsafe fn raw_close(&self, fd: Fd, callback: Option>) { + let u = Usercall(UsercallNrs::close as _, fd as _, 0, 0, 0); + self.send_usercall(u, callback.map(|cb| Callback::Close(cb))); + } + + unsafe fn raw_bind_stream( + &self, + addr: *const u8, + len: usize, + local_addr: *mut ByteBuffer, + callback: Option>>, + ) { + let u = Usercall(UsercallNrs::bind_stream as _, addr as _, len as _, local_addr as _, 0); + self.send_usercall(u, callback.map(|cb| Callback::BindStream(cb))); + } + + unsafe fn raw_accept_stream( + &self, + fd: Fd, + local_addr: *mut ByteBuffer, + peer_addr: *mut ByteBuffer, + callback: Option>>, + ) -> CancelHandle { + let u = Usercall( + UsercallNrs::accept_stream as _, + fd as _, + local_addr as _, + peer_addr as _, + 0, + ); + self.send_usercall(u, callback.map(|cb| Callback::AcceptStream(cb))) + } + + unsafe fn raw_connect_stream( + &self, + addr: *const u8, + len: usize, + local_addr: *mut ByteBuffer, + peer_addr: *mut ByteBuffer, + callback: Option>>, + ) -> CancelHandle { + let u = Usercall( + UsercallNrs::connect_stream as _, + addr as _, + len as _, + local_addr as _, + peer_addr as _, + ); + self.send_usercall(u, callback.map(|cb| Callback::ConnectStream(cb))) + } + + unsafe fn raw_insecure_time(&self, callback: Option>) { + let u = Usercall(UsercallNrs::insecure_time as _, 0, 0, 0, 0); + self.send_usercall(u, callback.map(|cb| Callback::InsecureTime(cb))); + } + + unsafe fn raw_alloc(&self, size: usize, alignment: usize, callback: Option>>) { + let u = Usercall(UsercallNrs::alloc as _, size as _, alignment as _, 0, 0); + self.send_usercall(u, callback.map(|cb| Callback::Alloc(cb))); + } + + unsafe fn raw_free(&self, ptr: *mut u8, size: usize, alignment: usize, callback: Option>) { + let u = Usercall(UsercallNrs::free as _, ptr as _, size as _, alignment as _, 0); + self.send_usercall(u, callback.map(|cb| Callback::Free(cb))); + } +} diff --git a/async-usercalls/src/tests.rs b/async-usercalls/src/tests.rs new file mode 100644 index 00000000..78cb2094 --- /dev/null +++ b/async-usercalls/src/tests.rs @@ -0,0 +1,251 @@ +use super::*; +use crate::hacks::MakeSend; +use crossbeam_channel as mpmc; +use std::io; +use std::net::{TcpListener, TcpStream}; +use std::os::fortanix_sgx::io::AsRawFd; +use std::os::fortanix_sgx::usercalls::alloc::User as StdUser; +use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; +use std::sync::Arc; +use std::thread; +use std::time::{Duration, UNIX_EPOCH}; + +#[test] +fn get_time_async_raw() { + fn run(tid: u32, provider: AsyncUsercallProvider) -> (u32, u32, Duration) { + let pid = provider.provider_id(); + const N: usize = 500; + let (tx, rx) = mpmc::bounded(N); + for _ in 0..N { + let tx = tx.clone(); + let cb = move |d| { + let system_time = UNIX_EPOCH + Duration::from_nanos(d); + tx.send(system_time).unwrap(); + }; + unsafe { + provider.raw_insecure_time(Some(cb.into())); + } + } + let mut all = Vec::with_capacity(N); + for _ in 0..N { + all.push(rx.recv().unwrap()); + } + + assert_eq!(all.len(), N); + // The results are returned in arbitrary order + all.sort(); + let t0 = *all.first().unwrap(); + let tn = *all.last().unwrap(); + let total = tn.duration_since(t0).unwrap(); + (tid, pid, total / N as u32) + } + + println!(); + const THREADS: usize = 4; + let mut providers = Vec::with_capacity(THREADS); + for _ in 0..THREADS { + providers.push(AsyncUsercallProvider::new()); + } + let mut handles = Vec::with_capacity(THREADS); + for (i, provider) in providers.into_iter().enumerate() { + handles.push(thread::spawn(move || run(i as u32, provider))); + } + for h in handles { + let res = h.join().unwrap(); + println!("[{}/{}] (Tn - T0) / N = {:?}", res.0, res.1, res.2); + } +} + +#[test] +fn raw_alloc_free() { + let provider = AsyncUsercallProvider::new(); + let ptr: Arc> = Arc::new(AtomicPtr::new(0 as _)); + let ptr2 = Arc::clone(&ptr); + const SIZE: usize = 1024; + const ALIGN: usize = 8; + + let (tx, rx) = mpmc::bounded(1); + let cb_alloc = move |p: io::Result<*mut u8>| { + let p = p.unwrap(); + ptr2.store(p, Ordering::Relaxed); + tx.send(()).unwrap(); + }; + unsafe { + provider.raw_alloc(SIZE, ALIGN, Some(cb_alloc.into())); + } + rx.recv().unwrap(); + let p = ptr.load(Ordering::Relaxed); + assert!(!p.is_null()); + + let (tx, rx) = mpmc::bounded(1); + let cb_free = move |()| { + tx.send(()).unwrap(); + }; + unsafe { + provider.raw_free(p, SIZE, ALIGN, Some(cb_free.into())); + } + rx.recv().unwrap(); +} + +#[test] +fn cancel_accept() { + let provider = Arc::new(AsyncUsercallProvider::new()); + let port = 6688; + let addr = format!("0.0.0.0:{}", port); + let (tx, rx) = mpmc::bounded(1); + provider.bind_stream(&addr, move |res| { + tx.send(res).unwrap(); + }); + let bind_res = rx.recv().unwrap(); + let listener = bind_res.unwrap(); + let fd = listener.as_raw_fd(); + let accept_count = Arc::new(AtomicUsize::new(0)); + let accept_count1 = Arc::clone(&accept_count); + let (tx, rx) = mpmc::bounded(1); + let accept = provider.accept_stream(fd, move |res| { + if let Ok(_) = res { + accept_count1.fetch_add(1, Ordering::Relaxed); + } + tx.send(()).unwrap(); + }); + accept.cancel(); + thread::sleep(Duration::from_millis(10)); + let _ = TcpStream::connect(&addr); + let _ = rx.recv(); + assert_eq!(accept_count.load(Ordering::Relaxed), 0); +} + +#[test] +fn connect() { + let listener = TcpListener::bind("0.0.0.0:0").unwrap(); + let addr = listener.local_addr().unwrap().to_string(); + let provider = AsyncUsercallProvider::new(); + let (tx, rx) = mpmc::bounded(1); + provider.connect_stream(&addr, move |res| { + tx.send(res).unwrap(); + }); + let res = rx.recv().unwrap(); + assert!(res.is_ok()); +} + +#[test] +fn safe_alloc_free() { + let provider = AsyncUsercallProvider::new(); + + const LEN: usize = 64 * 1024; + let (tx, rx) = mpmc::bounded(1); + provider.alloc_slice::(LEN, move |res| { + let buf = res.expect("failed to allocate memory"); + tx.send(MakeSend::new(buf)).unwrap(); + }); + let user_buf = rx.recv().unwrap().into_inner(); + assert_eq!(user_buf.len(), LEN); + + let (tx, rx) = mpmc::bounded(1); + let cb = move || { + tx.send(()).unwrap(); + }; + provider.free(user_buf, Some(cb)); + rx.recv().unwrap(); +} + +unsafe impl Send for MakeSend> {} + +#[test] +#[ignore] +fn echo() { + println!(); + let provider = Arc::new(AsyncUsercallProvider::new()); + const ADDR: &'static str = "0.0.0.0:7799"; + let (tx, rx) = mpmc::bounded(1); + provider.bind_stream(ADDR, move |res| { + tx.send(res).unwrap(); + }); + let bind_res = rx.recv().unwrap(); + let listener = bind_res.unwrap(); + println!("bind done: {:?}", listener); + let fd = listener.as_raw_fd(); + let cb = KeepAccepting { + listener, + provider: Arc::clone(&provider), + }; + provider.accept_stream(fd, cb); + thread::sleep(Duration::from_secs(60)); +} + +struct KeepAccepting { + listener: TcpListener, + provider: Arc, +} + +impl FnOnce<(io::Result,)> for KeepAccepting { + type Output = (); + + extern "rust-call" fn call_once(self, args: (io::Result,)) -> Self::Output { + let res = args.0; + println!("accept result: {:?}", res); + if let Ok(stream) = res { + let fd = stream.as_raw_fd(); + let cb = Echo { + stream, + read: true, + provider: self.provider.clone(), + }; + self.provider.read(fd, alloc_buf(Echo::READ_BUF_SIZE), cb); + } + let provider = Arc::clone(&self.provider); + provider.accept_stream(self.listener.as_raw_fd(), self); + } +} + +struct Echo { + stream: TcpStream, + read: bool, + provider: Arc, +} + +impl Echo { + const READ_BUF_SIZE: usize = 1024; + + fn close(self) { + let fd = self.stream.as_raw_fd(); + println!("connection closed, fd = {}", fd); + self.provider.close(fd, None::>); + } +} + +// read callback +impl FnOnce<(io::Result, User<[u8]>)> for Echo { + type Output = (); + + extern "rust-call" fn call_once(mut self, args: (io::Result, User<[u8]>)) -> Self::Output { + let (res, user) = args; + assert!(self.read); + match res { + Ok(len) if len > 0 => { + self.read = false; + let provider = Arc::clone(&self.provider); + provider.write(self.stream.as_raw_fd(), (user, 0..len).into(), self); + } + _ => self.close(), + } + } +} + +// write callback +impl FnOnce<(io::Result, UserBuf)> for Echo { + type Output = (); + + extern "rust-call" fn call_once(mut self, args: (io::Result, UserBuf)) -> Self::Output { + let (res, _) = args; + assert!(!self.read); + match res { + Ok(len) if len > 0 => { + self.read = true; + let provider = Arc::clone(&self.provider); + provider.read(self.stream.as_raw_fd(), alloc_buf(Echo::READ_BUF_SIZE), self); + } + _ => self.close(), + } + } +} diff --git a/async-usercalls/test.sh b/async-usercalls/test.sh new file mode 100755 index 00000000..cdb85673 --- /dev/null +++ b/async-usercalls/test.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +# Run this in parallel with: +# $ cargo test --target x86_64-fortanix-unknown-sgx --release -- --nocapture --ignored echo + +for i in $(seq 1 100); do + echo $i + telnet localhost 7799 < /dev/zero &> /dev/null & + sleep 0.01 +done + +sleep 10s +kill $(jobs -p) +wait From 373a2d2fe5a8ef3377a66223eacb83d9f199e0a6 Mon Sep 17 00:00:00 2001 From: Mohsen Zohrevandi Date: Thu, 15 Oct 2020 15:41:36 -0700 Subject: [PATCH 02/22] Poll based interface --- async-usercalls/src/lib.rs | 141 ++++++++++++++++++----------------- async-usercalls/src/tests.rs | 64 +++++++++++++--- 2 files changed, 125 insertions(+), 80 deletions(-) diff --git a/async-usercalls/src/lib.rs b/async-usercalls/src/lib.rs index 45c37781..3406b2e2 100644 --- a/async-usercalls/src/lib.rs +++ b/async-usercalls/src/lib.rs @@ -6,11 +6,9 @@ use crossbeam_channel as mpmc; use ipc_queue::Identified; use std::collections::HashMap; -use std::os::fortanix_sgx::usercalls::raw::UsercallNrs; use std::panic; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Arc; -use std::thread::{self, JoinHandle}; +use std::sync::Mutex; +use std::time::Duration; mod alloc; mod batch_drop; @@ -51,36 +49,34 @@ impl<'p> CancelHandle<'p> { /// This type provides a mechanism for submitting usercalls asynchronously. /// Usercalls are sent to the enclave runner through a queue. The results are -/// retrieved on a dedicated thread. Users are notified of the results through -/// callback functions. +/// retrieved when `fn poll` is called. Users are notified of the results +/// through callback functions. /// /// Users of this type should take care not to block execution in callbacks. -/// Ceratin usercalls can be cancelled through a handle, but note that it is +/// Certain usercalls can be cancelled through a handle, but note that it is /// still possible to receive successful results for cancelled usercalls. pub struct AsyncUsercallProvider { core: ProviderCore, + return_rx: mpmc::Receiver>, + callbacks: Mutex>, + // This mpmc channel is an optimization so that threads sending usercalls + // don't have to take the lock. callback_tx: mpmc::Sender<(u64, Callback)>, - shutdown: Arc, - join_handle: Option>, + callback_rx: mpmc::Receiver<(u64, Callback)>, } impl AsyncUsercallProvider { pub fn new() -> Self { let (return_tx, return_rx) = mpmc::unbounded(); let core = ProviderCore::new(Some(return_tx)); + let callbacks = Mutex::new(HashMap::new()); let (callback_tx, callback_rx) = mpmc::unbounded(); - let shutdown = Arc::new(AtomicBool::new(false)); - let callback_handler = CallbackHandler { - return_rx, - callback_rx, - shutdown: Arc::clone(&shutdown), - }; - let join_handle = thread::spawn(move || callback_handler.run()); Self { core, + return_rx, + callbacks, callback_tx, - shutdown, - join_handle: Some(join_handle), + callback_rx, } } @@ -98,68 +94,73 @@ impl AsyncUsercallProvider { } self.core.send_usercall(usercall) } -} - -impl Drop for AsyncUsercallProvider { - fn drop(&mut self) { - self.shutdown.store(true, Ordering::Release); - // send a usercall to ensure CallbackHandler wakes up and breaks its loop. - let u = Usercall(UsercallNrs::insecure_time as _, 0, 0, 0, 0); - self.send_usercall(u, None); - let join_handle = self.join_handle.take().unwrap(); - join_handle.join().unwrap(); - } -} - -struct CallbackHandler { - return_rx: mpmc::Receiver>, - callback_rx: mpmc::Receiver<(u64, Callback)>, - shutdown: Arc, -} - -impl CallbackHandler { - const BATCH: usize = 1024; - fn recv_returns(&self) -> ([Identified; Self::BATCH], usize) { - let first = self.return_rx.recv().expect("channel closed unexpectedly"); - let mut returns = [Identified { - id: 0, - data: Return(0, 0), - }; Self::BATCH]; + fn recv_returns(&self, timeout: Option, returns: &mut [Identified]) -> usize { + let first = match timeout { + None => self.return_rx.recv().ok(), + Some(timeout) => match self.return_rx.recv_timeout(timeout) { + Ok(val) => Some(val), + Err(mpmc::RecvTimeoutError::Disconnected) => None, + Err(mpmc::RecvTimeoutError::Timeout) => return 0, + }, + } + .expect("return channel closed unexpectedly"); let mut count = 0; - for ret in std::iter::once(first).chain(self.return_rx.try_iter().take(Self::BATCH - 1)) { + for ret in std::iter::once(first).chain(self.return_rx.try_iter().take(returns.len() - 1)) { returns[count] = ret; count += 1; } - (returns, count) + count } - fn run(self) { - let mut callbacks = HashMap::with_capacity(256); - loop { - // block until there are some returns - let (returns, count) = self.recv_returns(); - // receive pending callbacks - for (id, callback) in self.callback_rx.try_iter() { - callbacks.insert(id, callback); - } - for ret in &returns[..count] { - if let Some(cb) = callbacks.remove(&ret.id) { - let _r = panic::catch_unwind(panic::AssertUnwindSafe(move || { - cb.call(ret.data); - })); - // if let Err(e) = _r { - // let msg = e - // .downcast_ref::() - // .map(String::as_str) - // .or_else(|| e.downcast_ref::<&str>().map(|&s| s)); - // println!("callback paniced: {:?}", msg); - // } + /// Poll for returned usercalls and execute their respective callback + /// functions. If `timeout` is `None`, it will block execution until at + /// least one return is received, otherwise it will block until there is a + /// return or timeout is elapsed. Returns the number of executed callbacks. + pub fn poll(&self, timeout: Option) -> usize { + // 1. wait for returns + let mut returns = [Identified { + id: 0, + data: Return(0, 0), + }; 1024]; + let returns = match self.recv_returns(timeout, &mut returns) { + 0 => return 0, + n => &returns[..n], + }; + // 2. try to lock the mutex, if successful, receive all pending callbacks and put them in the hash map + let mut guard = match self.callbacks.try_lock() { + Ok(mut callbacks) => { + for (id, cb) in self.callback_rx.try_iter() { + callbacks.insert(id, cb); } + callbacks } - if self.shutdown.load(Ordering::Acquire) { - break; + _ => self.callbacks.lock().unwrap(), + }; + // 3. remove callbacks for returns received in step 1 from the hash map + let mut ret_callbacks = Vec::with_capacity(returns.len()); + for ret in returns { + let cb = guard.remove(&ret.id); + ret_callbacks.push((ret, cb)); + } + drop(guard); + // 4. execute the callbacks without hugging the mutex + let mut count = 0; + for (ret, cb) in ret_callbacks { + if let Some(cb) = cb { + let _r = panic::catch_unwind(panic::AssertUnwindSafe(move || { + cb.call(ret.data); + })); + count += 1; + // if let Err(e) = _r { + // let msg = e + // .downcast_ref::() + // .map(String::as_str) + // .or_else(|| e.downcast_ref::<&str>().map(|&s| s)); + // println!("callback paniced: {:?}", msg); + // } } } + count } } diff --git a/async-usercalls/src/tests.rs b/async-usercalls/src/tests.rs index 78cb2094..2ec544e3 100644 --- a/async-usercalls/src/tests.rs +++ b/async-usercalls/src/tests.rs @@ -3,16 +3,60 @@ use crate::hacks::MakeSend; use crossbeam_channel as mpmc; use std::io; use std::net::{TcpListener, TcpStream}; +use std::ops::Deref; use std::os::fortanix_sgx::io::AsRawFd; use std::os::fortanix_sgx::usercalls::alloc::User as StdUser; -use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; +use std::sync::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering}; use std::sync::Arc; use std::thread; use std::time::{Duration, UNIX_EPOCH}; +struct AutoPollingProvider { + provider: Arc, + shutdown: Arc, + join_handle: Option>, +} + +impl AutoPollingProvider { + fn new() -> Self { + let provider = Arc::new(AsyncUsercallProvider::new()); + let shutdown = Arc::new(AtomicBool::new(false)); + let shutdown1 = shutdown.clone(); + let provider1 = provider.clone(); + let join_handle = Some(thread::spawn(move || loop { + provider1.poll(None); + if shutdown1.load(Ordering::Relaxed) { + break; + } + })); + Self { + provider, + shutdown, + join_handle, + } + } +} + +impl Deref for AutoPollingProvider { + type Target = AsyncUsercallProvider; + + fn deref(&self) -> &Self::Target { + &*self.provider + } +} + +impl Drop for AutoPollingProvider { + fn drop(&mut self) { + self.shutdown.store(true, Ordering::Relaxed); + // send a usercall to ensure thread wakes up + self.provider.insecure_time(|_| {}); + self.join_handle.take().unwrap().join().unwrap(); + } +} + #[test] fn get_time_async_raw() { - fn run(tid: u32, provider: AsyncUsercallProvider) -> (u32, u32, Duration) { + fn run(tid: u32, provider: AutoPollingProvider) -> (u32, u32, Duration) { let pid = provider.provider_id(); const N: usize = 500; let (tx, rx) = mpmc::bounded(N); @@ -44,7 +88,7 @@ fn get_time_async_raw() { const THREADS: usize = 4; let mut providers = Vec::with_capacity(THREADS); for _ in 0..THREADS { - providers.push(AsyncUsercallProvider::new()); + providers.push(AutoPollingProvider::new()); } let mut handles = Vec::with_capacity(THREADS); for (i, provider) in providers.into_iter().enumerate() { @@ -58,7 +102,7 @@ fn get_time_async_raw() { #[test] fn raw_alloc_free() { - let provider = AsyncUsercallProvider::new(); + let provider = AutoPollingProvider::new(); let ptr: Arc> = Arc::new(AtomicPtr::new(0 as _)); let ptr2 = Arc::clone(&ptr); const SIZE: usize = 1024; @@ -89,7 +133,7 @@ fn raw_alloc_free() { #[test] fn cancel_accept() { - let provider = Arc::new(AsyncUsercallProvider::new()); + let provider = AutoPollingProvider::new(); let port = 6688; let addr = format!("0.0.0.0:{}", port); let (tx, rx) = mpmc::bounded(1); @@ -119,7 +163,7 @@ fn cancel_accept() { fn connect() { let listener = TcpListener::bind("0.0.0.0:0").unwrap(); let addr = listener.local_addr().unwrap().to_string(); - let provider = AsyncUsercallProvider::new(); + let provider = AutoPollingProvider::new(); let (tx, rx) = mpmc::bounded(1); provider.connect_stream(&addr, move |res| { tx.send(res).unwrap(); @@ -130,7 +174,7 @@ fn connect() { #[test] fn safe_alloc_free() { - let provider = AsyncUsercallProvider::new(); + let provider = AutoPollingProvider::new(); const LEN: usize = 64 * 1024; let (tx, rx) = mpmc::bounded(1); @@ -155,7 +199,7 @@ unsafe impl Send for MakeSend> {} #[ignore] fn echo() { println!(); - let provider = Arc::new(AsyncUsercallProvider::new()); + let provider = Arc::new(AutoPollingProvider::new()); const ADDR: &'static str = "0.0.0.0:7799"; let (tx, rx) = mpmc::bounded(1); provider.bind_stream(ADDR, move |res| { @@ -175,7 +219,7 @@ fn echo() { struct KeepAccepting { listener: TcpListener, - provider: Arc, + provider: Arc, } impl FnOnce<(io::Result,)> for KeepAccepting { @@ -201,7 +245,7 @@ impl FnOnce<(io::Result,)> for KeepAccepting { struct Echo { stream: TcpStream, read: bool, - provider: Arc, + provider: Arc, } impl Echo { From a75e58933795db7b8b6aa13899df6287d8dc6e87 Mon Sep 17 00:00:00 2001 From: Mohsen Zohrevandi Date: Mon, 19 Oct 2020 11:43:43 -0700 Subject: [PATCH 03/22] Poll through separate type --- async-usercalls/src/lib.rs | 29 +++++++++++++++++------------ async-usercalls/src/tests.rs | 9 ++++----- 2 files changed, 21 insertions(+), 17 deletions(-) diff --git a/async-usercalls/src/lib.rs b/async-usercalls/src/lib.rs index 3406b2e2..e32286e5 100644 --- a/async-usercalls/src/lib.rs +++ b/async-usercalls/src/lib.rs @@ -49,35 +49,30 @@ impl<'p> CancelHandle<'p> { /// This type provides a mechanism for submitting usercalls asynchronously. /// Usercalls are sent to the enclave runner through a queue. The results are -/// retrieved when `fn poll` is called. Users are notified of the results -/// through callback functions. +/// retrieved when `CallbackHandler::poll` is called. Users are notified of the +/// results through callback functions. /// /// Users of this type should take care not to block execution in callbacks. /// Certain usercalls can be cancelled through a handle, but note that it is /// still possible to receive successful results for cancelled usercalls. pub struct AsyncUsercallProvider { core: ProviderCore, - return_rx: mpmc::Receiver>, - callbacks: Mutex>, - // This mpmc channel is an optimization so that threads sending usercalls - // don't have to take the lock. callback_tx: mpmc::Sender<(u64, Callback)>, - callback_rx: mpmc::Receiver<(u64, Callback)>, } impl AsyncUsercallProvider { - pub fn new() -> Self { + pub fn new() -> (Self, CallbackHandler) { let (return_tx, return_rx) = mpmc::unbounded(); let core = ProviderCore::new(Some(return_tx)); let callbacks = Mutex::new(HashMap::new()); let (callback_tx, callback_rx) = mpmc::unbounded(); - Self { - core, + let provider = Self { core, callback_tx }; + let handler = CallbackHandler { return_rx, callbacks, - callback_tx, callback_rx, - } + }; + (provider, handler) } #[cfg(test)] @@ -94,7 +89,17 @@ impl AsyncUsercallProvider { } self.core.send_usercall(usercall) } +} + +pub struct CallbackHandler { + return_rx: mpmc::Receiver>, + callbacks: Mutex>, + // This is used so that threads sending usercalls don't have to take the lock. + callback_rx: mpmc::Receiver<(u64, Callback)>, +} +impl CallbackHandler { + #[inline] fn recv_returns(&self, timeout: Option, returns: &mut [Identified]) -> usize { let first = match timeout { None => self.return_rx.recv().ok(), diff --git a/async-usercalls/src/tests.rs b/async-usercalls/src/tests.rs index 2ec544e3..a72d6c7d 100644 --- a/async-usercalls/src/tests.rs +++ b/async-usercalls/src/tests.rs @@ -12,19 +12,18 @@ use std::thread; use std::time::{Duration, UNIX_EPOCH}; struct AutoPollingProvider { - provider: Arc, + provider: AsyncUsercallProvider, shutdown: Arc, join_handle: Option>, } impl AutoPollingProvider { fn new() -> Self { - let provider = Arc::new(AsyncUsercallProvider::new()); + let (provider, handler) = AsyncUsercallProvider::new(); let shutdown = Arc::new(AtomicBool::new(false)); let shutdown1 = shutdown.clone(); - let provider1 = provider.clone(); let join_handle = Some(thread::spawn(move || loop { - provider1.poll(None); + handler.poll(None); if shutdown1.load(Ordering::Relaxed) { break; } @@ -41,7 +40,7 @@ impl Deref for AutoPollingProvider { type Target = AsyncUsercallProvider; fn deref(&self) -> &Self::Target { - &*self.provider + &self.provider } } From 95ec8ae5543a55272364d0e55d9e55b63c885296 Mon Sep 17 00:00:00 2001 From: Mohsen Zohrevandi Date: Mon, 19 Oct 2020 12:29:04 -0700 Subject: [PATCH 04/22] Simplify CancelHandle --- async-usercalls/src/lib.rs | 16 ++++++++-------- async-usercalls/src/provider_core.rs | 15 +++++++-------- async-usercalls/src/queues.rs | 18 ++++++++++-------- 3 files changed, 25 insertions(+), 24 deletions(-) diff --git a/async-usercalls/src/lib.rs b/async-usercalls/src/lib.rs index e32286e5..e3a9e5d3 100644 --- a/async-usercalls/src/lib.rs +++ b/async-usercalls/src/lib.rs @@ -32,18 +32,18 @@ use self::hacks::{Cancel, Return, Usercall}; use self::provider_core::ProviderCore; use self::queues::*; -pub struct CancelHandle<'p> { - c: Identified, - tx: &'p Sender, -} +pub struct CancelHandle(Identified); -impl<'p> CancelHandle<'p> { +impl CancelHandle { pub fn cancel(self) { - self.tx.send(self.c).expect("failed to send cancellation"); + PROVIDERS + .cancel_sender() + .send(self.0) + .expect("failed to send cancellation"); } - pub(crate) fn new(c: Identified, tx: &'p Sender) -> Self { - CancelHandle { c, tx } + pub(crate) fn new(c: Identified) -> Self { + CancelHandle(c) } } diff --git a/async-usercalls/src/provider_core.rs b/async-usercalls/src/provider_core.rs index 6d0025a7..9acb318c 100644 --- a/async-usercalls/src/provider_core.rs +++ b/async-usercalls/src/provider_core.rs @@ -6,18 +6,14 @@ use ipc_queue::Identified; use std::sync::atomic::{AtomicU32, Ordering}; pub(crate) struct ProviderCore { - usercall_tx: Sender, - cancel_tx: Sender, provider_id: u32, next_id: AtomicU32, } impl ProviderCore { pub fn new(return_tx: Option>>) -> ProviderCore { - let (usercall_tx, cancel_tx, provider_id) = PROVIDERS.new_provider(return_tx); + let provider_id = PROVIDERS.new_provider(return_tx); ProviderCore { - usercall_tx, - cancel_tx, provider_id, next_id: AtomicU32::new(1), } @@ -52,13 +48,16 @@ impl ProviderCore { reserved: 0, }, }; - self.usercall_tx.send(usercall).expect("failed to send async usercall"); - CancelHandle::new(cancel, &self.cancel_tx) + PROVIDERS + .usercall_sender() + .send(usercall) + .expect("failed to send async usercall"); + CancelHandle::new(cancel) } // returns the number of usercalls successfully sent. pub fn try_send_multiple_usercalls(&self, usercalls: &[Identified]) -> usize { - self.usercall_tx.try_send_multiple(usercalls).unwrap_or(0) + PROVIDERS.usercall_sender().try_send_multiple(usercalls).unwrap_or(0) } } diff --git a/async-usercalls/src/queues.rs b/async-usercalls/src/queues.rs index 18c9eade..75fbf2eb 100644 --- a/async-usercalls/src/queues.rs +++ b/async-usercalls/src/queues.rs @@ -17,20 +17,22 @@ pub(crate) struct Providers { } impl Providers { - pub(crate) fn new_provider( - &self, - return_tx: Option>>, - ) -> (Sender, Sender, u32) { - let id = self.provider_map.lock().unwrap().insert(return_tx); - let usercall_queue_tx = self.usercall_queue_tx.clone(); - let cancel_queue_tx = self.cancel_queue_tx.clone(); - (usercall_queue_tx, cancel_queue_tx, id) + pub(crate) fn new_provider(&self, return_tx: Option>>) -> u32 { + self.provider_map.lock().unwrap().insert(return_tx) } pub(crate) fn remove_provider(&self, id: u32) { let entry = self.provider_map.lock().unwrap().remove(id); assert!(entry.is_some()); } + + pub(crate) fn usercall_sender(&self) -> &Sender { + &self.usercall_queue_tx + } + + pub(crate) fn cancel_sender(&self) -> &Sender { + &self.cancel_queue_tx + } } lazy_static! { From c650f46ede00c5e4ad31d54397dfe476bec4ea64 Mon Sep 17 00:00:00 2001 From: Mohsen Zohrevandi Date: Tue, 20 Oct 2020 10:36:26 -0700 Subject: [PATCH 05/22] Move io_bufs out of alloc module --- async-usercalls/src/alloc/mod.rs | 2 - async-usercalls/src/alloc/tests.rs | 60 ---------------------- async-usercalls/src/{alloc => }/io_bufs.rs | 2 +- async-usercalls/src/lib.rs | 4 +- async-usercalls/src/provider_api.rs | 3 +- async-usercalls/src/tests.rs | 59 +++++++++++++++++++++ 6 files changed, 65 insertions(+), 65 deletions(-) rename async-usercalls/src/{alloc => }/io_bufs.rs (99%) diff --git a/async-usercalls/src/alloc/mod.rs b/async-usercalls/src/alloc/mod.rs index ab1085c0..d1a1abff 100644 --- a/async-usercalls/src/alloc/mod.rs +++ b/async-usercalls/src/alloc/mod.rs @@ -3,13 +3,11 @@ use std::os::fortanix_sgx::usercalls::raw::ByteBuffer; mod allocator; mod bitmap; -mod io_bufs; mod slab; #[cfg(test)] mod tests; use self::allocator::{LocalAllocator, SharedAllocator}; -pub use self::io_bufs::{ReadBuffer, UserBuf, WriteBuffer}; pub use self::slab::{User, UserSafeExt}; /// Allocates a slice of bytes in userspace that is at least as large as `size`. diff --git a/async-usercalls/src/alloc/tests.rs b/async-usercalls/src/alloc/tests.rs index da4e8b3d..0d9906ab 100644 --- a/async-usercalls/src/alloc/tests.rs +++ b/async-usercalls/src/alloc/tests.rs @@ -1,6 +1,5 @@ use super::allocator::SharedAllocator; use super::bitmap::*; -use super::io_bufs::{ReadBuffer, UserBuf, WriteBuffer}; use super::slab::{BufSlab, Slab, SlabAllocator, User}; use crossbeam_channel as mpmc; use std::collections::HashSet; @@ -262,62 +261,3 @@ fn alloc_buf_size() { let b = super::alloc_buf(8 * 1024); assert_eq!(b.len(), 8 * 1024); } - -#[test] -fn write_buffer_basic() { - const LENGTH: usize = 1024; - let mut write_buffer = WriteBuffer::new(super::alloc_buf(1024)); - - let buf = vec![0u8; LENGTH]; - assert_eq!(write_buffer.write(&buf), LENGTH); - assert_eq!(write_buffer.write(&buf), 0); - - let chunk = write_buffer.consumable_chunk().unwrap(); - write_buffer.consume(chunk, 200); - assert_eq!(write_buffer.write(&buf), 200); - assert_eq!(write_buffer.write(&buf), 0); -} - -#[test] -#[should_panic] -fn call_consumable_chunk_twice() { - const LENGTH: usize = 1024; - let mut write_buffer = WriteBuffer::new(super::alloc_buf(1024)); - - let buf = vec![0u8; LENGTH]; - assert_eq!(write_buffer.write(&buf), LENGTH); - assert_eq!(write_buffer.write(&buf), 0); - - let chunk1 = write_buffer.consumable_chunk().unwrap(); - let _ = write_buffer.consumable_chunk().unwrap(); - drop(chunk1); -} - -#[test] -#[should_panic] -fn consume_wrong_buf() { - const LENGTH: usize = 1024; - let mut write_buffer = WriteBuffer::new(super::alloc_buf(1024)); - - let buf = vec![0u8; LENGTH]; - assert_eq!(write_buffer.write(&buf), LENGTH); - assert_eq!(write_buffer.write(&buf), 0); - - let unrelated_buf: UserBuf = super::alloc_buf(512).into(); - write_buffer.consume(unrelated_buf, 100); -} - -#[test] -fn read_buffer_basic() { - let mut buf = super::alloc_buf(64); - const DATA: &'static [u8] = b"hello"; - buf[0..DATA.len()].copy_from_enclave(DATA); - - let mut read_buffer = ReadBuffer::new(buf, DATA.len()); - assert_eq!(read_buffer.len(), DATA.len()); - assert_eq!(read_buffer.remaining_bytes(), DATA.len()); - let mut buf = [0u8; 8]; - assert_eq!(read_buffer.read(&mut buf), DATA.len()); - assert_eq!(read_buffer.remaining_bytes(), 0); - assert_eq!(&buf, b"hello\0\0\0"); -} diff --git a/async-usercalls/src/alloc/io_bufs.rs b/async-usercalls/src/io_bufs.rs similarity index 99% rename from async-usercalls/src/alloc/io_bufs.rs rename to async-usercalls/src/io_bufs.rs index 3880e763..7eb6b6e0 100644 --- a/async-usercalls/src/alloc/io_bufs.rs +++ b/async-usercalls/src/io_bufs.rs @@ -1,4 +1,4 @@ -use super::slab::User; +use crate::alloc::User; use std::cell::UnsafeCell; use std::cmp; use std::io::IoSlice; diff --git a/async-usercalls/src/lib.rs b/async-usercalls/src/lib.rs index e3a9e5d3..ab2b9e01 100644 --- a/async-usercalls/src/lib.rs +++ b/async-usercalls/src/lib.rs @@ -15,6 +15,7 @@ mod batch_drop; mod callback; mod duplicated; mod hacks; +mod io_bufs; mod provider_api; mod provider_core; mod queues; @@ -22,9 +23,10 @@ mod raw; #[cfg(test)] mod tests; -pub use self::alloc::{alloc_buf, alloc_byte_buffer, ReadBuffer, User, UserBuf, UserSafeExt, WriteBuffer}; +pub use self::alloc::{alloc_buf, alloc_byte_buffer, User, UserSafeExt}; pub use self::batch_drop::batch_drop; pub use self::callback::CbFn; +pub use self::io_bufs::{ReadBuffer, UserBuf, WriteBuffer}; pub use self::raw::RawApi; use self::callback::*; diff --git a/async-usercalls/src/provider_api.rs b/async-usercalls/src/provider_api.rs index 087a22ee..0dff9f7f 100644 --- a/async-usercalls/src/provider_api.rs +++ b/async-usercalls/src/provider_api.rs @@ -1,6 +1,7 @@ -use crate::alloc::{alloc_buf, alloc_byte_buffer, User, UserBuf}; +use crate::alloc::{alloc_buf, alloc_byte_buffer, User}; use crate::batch_drop; use crate::hacks::{new_std_listener, new_std_stream, MakeSend}; +use crate::io_bufs::UserBuf; use crate::raw::RawApi; use crate::{AsyncUsercallProvider, CancelHandle}; use fortanix_sgx_abi::Fd; diff --git a/async-usercalls/src/tests.rs b/async-usercalls/src/tests.rs index a72d6c7d..406c8244 100644 --- a/async-usercalls/src/tests.rs +++ b/async-usercalls/src/tests.rs @@ -194,6 +194,65 @@ fn safe_alloc_free() { unsafe impl Send for MakeSend> {} +#[test] +fn write_buffer_basic() { + const LENGTH: usize = 1024; + let mut write_buffer = WriteBuffer::new(super::alloc_buf(1024)); + + let buf = vec![0u8; LENGTH]; + assert_eq!(write_buffer.write(&buf), LENGTH); + assert_eq!(write_buffer.write(&buf), 0); + + let chunk = write_buffer.consumable_chunk().unwrap(); + write_buffer.consume(chunk, 200); + assert_eq!(write_buffer.write(&buf), 200); + assert_eq!(write_buffer.write(&buf), 0); +} + +#[test] +#[should_panic] +fn call_consumable_chunk_twice() { + const LENGTH: usize = 1024; + let mut write_buffer = WriteBuffer::new(super::alloc_buf(1024)); + + let buf = vec![0u8; LENGTH]; + assert_eq!(write_buffer.write(&buf), LENGTH); + assert_eq!(write_buffer.write(&buf), 0); + + let chunk1 = write_buffer.consumable_chunk().unwrap(); + let _ = write_buffer.consumable_chunk().unwrap(); + drop(chunk1); +} + +#[test] +#[should_panic] +fn consume_wrong_buf() { + const LENGTH: usize = 1024; + let mut write_buffer = WriteBuffer::new(super::alloc_buf(1024)); + + let buf = vec![0u8; LENGTH]; + assert_eq!(write_buffer.write(&buf), LENGTH); + assert_eq!(write_buffer.write(&buf), 0); + + let unrelated_buf: UserBuf = super::alloc_buf(512).into(); + write_buffer.consume(unrelated_buf, 100); +} + +#[test] +fn read_buffer_basic() { + let mut buf = super::alloc_buf(64); + const DATA: &'static [u8] = b"hello"; + buf[0..DATA.len()].copy_from_enclave(DATA); + + let mut read_buffer = ReadBuffer::new(buf, DATA.len()); + assert_eq!(read_buffer.len(), DATA.len()); + assert_eq!(read_buffer.remaining_bytes(), DATA.len()); + let mut buf = [0u8; 8]; + assert_eq!(read_buffer.read(&mut buf), DATA.len()); + assert_eq!(read_buffer.remaining_bytes(), 0); + assert_eq!(&buf, b"hello\0\0\0"); +} + #[test] #[ignore] fn echo() { From b6a2437c24497c794f5a6100449182fb4665905e Mon Sep 17 00:00:00 2001 From: Mohsen Zohrevandi Date: Tue, 20 Oct 2020 10:52:17 -0700 Subject: [PATCH 06/22] Remove allocator module for now --- async-usercalls/src/alloc/allocator.rs | 145 -------------- async-usercalls/src/alloc/bitmap.rs | 156 --------------- async-usercalls/src/alloc/mod.rs | 67 ------- async-usercalls/src/alloc/slab.rs | 198 ------------------- async-usercalls/src/alloc/tests.rs | 263 ------------------------- async-usercalls/src/hacks/mod.rs | 5 +- async-usercalls/src/io_bufs.rs | 3 +- async-usercalls/src/lib.rs | 2 - async-usercalls/src/provider_api.rs | 33 ++-- async-usercalls/src/tests.rs | 23 ++- 10 files changed, 33 insertions(+), 862 deletions(-) delete mode 100644 async-usercalls/src/alloc/allocator.rs delete mode 100644 async-usercalls/src/alloc/bitmap.rs delete mode 100644 async-usercalls/src/alloc/mod.rs delete mode 100644 async-usercalls/src/alloc/slab.rs delete mode 100644 async-usercalls/src/alloc/tests.rs diff --git a/async-usercalls/src/alloc/allocator.rs b/async-usercalls/src/alloc/allocator.rs deleted file mode 100644 index 7c6cef9f..00000000 --- a/async-usercalls/src/alloc/allocator.rs +++ /dev/null @@ -1,145 +0,0 @@ -use super::slab::{BufSlab, Slab, SlabAllocator, User, MAX_COUNT}; -use std::cmp; -use std::os::fortanix_sgx::usercalls::raw::ByteBuffer; - -pub const MIN_BUF_SIZE: usize = 1 << 5; // 32 bytes -pub const MAX_BUF_SIZE: usize = 1 << 16; // 64 KB -pub const NUM_SIZES: usize = 1 + (MAX_BUF_SIZE / MIN_BUF_SIZE).trailing_zeros() as usize; - -pub struct SharedAllocator { - by_size: Vec>, - byte_buffers: Vec>, -} - -unsafe impl Send for SharedAllocator {} -unsafe impl Sync for SharedAllocator {} - -impl SharedAllocator { - pub fn new(buf_counts: [usize; NUM_SIZES], byte_buffer_count: usize) -> Self { - let mut by_size = Vec::with_capacity(NUM_SIZES); - for i in 0..NUM_SIZES { - by_size.push(make_buf_slabs(buf_counts[i], MIN_BUF_SIZE << i)); - } - let byte_buffers = make_byte_buffers(byte_buffer_count); - Self { by_size, byte_buffers } - } - - pub fn alloc_buf(&self, size: usize) -> Option> { - assert!(size > 0); - if size > MAX_BUF_SIZE { - return None; - } - let (_, index) = size_index(size); - self.by_size[index].alloc() - } - - pub fn alloc_byte_buffer(&self) -> Option> { - self.byte_buffers.alloc() - } -} - -pub struct LocalAllocator { - initial_buf_counts: [usize; NUM_SIZES], - initial_byte_buffer_count: usize, - inner: SharedAllocator, -} - -impl LocalAllocator { - pub fn new(initial_buf_counts: [usize; NUM_SIZES], initial_byte_buffer_count: usize) -> Self { - let mut by_size = Vec::with_capacity(NUM_SIZES); - by_size.resize_with(NUM_SIZES, Default::default); - let byte_buffers = Vec::new(); - Self { - initial_buf_counts, - initial_byte_buffer_count, - inner: SharedAllocator { by_size, byte_buffers }, - } - } - - pub fn alloc_buf(&mut self, request_size: usize) -> User<[u8]> { - assert!(request_size > 0); - if request_size > MAX_BUF_SIZE { - // Always allocate very large buffers directly - return User::<[u8]>::uninitialized(request_size); - } - let (size, index) = size_index(request_size); - if let Some(buf) = self.inner.by_size[index].alloc() { - return buf; - } - let slabs = &mut self.inner.by_size[index]; - if slabs.len() >= 8 { - // Keep the number of slabs for each size small. - return User::<[u8]>::uninitialized(request_size); - } - let count = slabs.last().map_or(self.initial_buf_counts[index], |s| s.count() * 2); - // Limit each slab's count for better worst-case performance. - let count = cmp::min(count, MAX_COUNT / 8); - slabs.push(BufSlab::new(count, size)); - slabs.last().unwrap().alloc().expect("fresh slab failed to allocate") - } - - pub fn alloc_byte_buffer(&mut self) -> User { - let bbs = &mut self.inner.byte_buffers; - if let Some(byte_buffer) = bbs.alloc() { - return byte_buffer; - } - if bbs.len() >= 8 { - // Keep the number of slabs small. - return User::::uninitialized(); - } - let count = bbs.last().map_or(self.initial_byte_buffer_count, |s| s.count() * 2); - // Limit each slab's count for better worst-case performance. - let count = cmp::min(count, MAX_COUNT / 8); - bbs.push(Slab::new(count)); - bbs.last().unwrap().alloc().expect("fresh slab failed to allocate") - } -} - -fn make_buf_slabs(count: usize, size: usize) -> Vec { - match count { - 0 => Vec::new(), - n if n < 1024 => vec![BufSlab::new(n, size)], - n if n < 4 * 1024 => vec![BufSlab::new(n / 2, size), BufSlab::new(n / 2, size)], - n if n < 32 * 1024 => vec![ - BufSlab::new(n / 4, size), - BufSlab::new(n / 4, size), - BufSlab::new(n / 4, size), - BufSlab::new(n / 4, size), - ], - n => vec![ - BufSlab::new(n / 8, size), - BufSlab::new(n / 8, size), - BufSlab::new(n / 8, size), - BufSlab::new(n / 8, size), - BufSlab::new(n / 8, size), - BufSlab::new(n / 8, size), - BufSlab::new(n / 8, size), - BufSlab::new(n / 8, size), - ], - } -} - -fn make_byte_buffers(count: usize) -> Vec> { - match count { - 0 => Vec::new(), - n if n < 1024 => vec![Slab::new(n)], - n if n < 4 * 1024 => vec![Slab::new(n / 2), Slab::new(n / 2)], - n if n < 32 * 1024 => vec![Slab::new(n / 4), Slab::new(n / 4), Slab::new(n / 4), Slab::new(n / 4)], - n => vec![ - Slab::new(n / 8), - Slab::new(n / 8), - Slab::new(n / 8), - Slab::new(n / 8), - Slab::new(n / 8), - Slab::new(n / 8), - Slab::new(n / 8), - Slab::new(n / 8), - ], - } -} - -fn size_index(request_size: usize) -> (usize, usize) { - let size = cmp::max(MIN_BUF_SIZE, request_size.next_power_of_two()); - let index = (size / MIN_BUF_SIZE).trailing_zeros() as usize; - (size, index) -} diff --git a/async-usercalls/src/alloc/bitmap.rs b/async-usercalls/src/alloc/bitmap.rs deleted file mode 100644 index 80da1cca..00000000 --- a/async-usercalls/src/alloc/bitmap.rs +++ /dev/null @@ -1,156 +0,0 @@ -use spin::Mutex; -use std::sync::atomic::*; - -pub struct OptionalBitmap(BitmapKind); - -struct LargeBitmap(Mutex); - -struct LargeBitmapInner { - bits: Box<[u64]>, - unset_count: usize, // optimization -} - -enum BitmapKind { - None, - V1(AtomicU8), - V2(AtomicU16), - V3(AtomicU32), - V4(AtomicU64), - V5(LargeBitmap), -} - -impl OptionalBitmap { - pub fn none() -> Self { - Self(BitmapKind::None) - } - - /// `bit_count` must be >= 8 and a power of two - pub fn new(bit_count: usize) -> Self { - Self(match bit_count { - 8 => BitmapKind::V1(AtomicU8::new(0)), - 16 => BitmapKind::V2(AtomicU16::new(0)), - 32 => BitmapKind::V3(AtomicU32::new(0)), - 64 => BitmapKind::V4(AtomicU64::new(0)), - n if n > 0 && n % 64 == 0 => { - let bits = vec![0u64; n / 64].into_boxed_slice(); - BitmapKind::V5(LargeBitmap(Mutex::new(LargeBitmapInner { - bits, - unset_count: bit_count, - }))) - } - _ => panic!("bit_count must be >= 8 and a power of two"), - }) - } - - /// set the bit at given index to 0 and panic if the old value was not 1. - pub fn unset(&self, index: usize) { - match self.0 { - BitmapKind::None => {} - BitmapKind::V1(ref a) => a.unset(index), - BitmapKind::V2(ref b) => b.unset(index), - BitmapKind::V3(ref c) => c.unset(index), - BitmapKind::V4(ref d) => d.unset(index), - BitmapKind::V5(ref e) => e.unset(index), - } - } - - /// return the index of a previously unset bit and set that bit to 1. - pub fn reserve(&self) -> Option { - match self.0 { - BitmapKind::None => None, - BitmapKind::V1(ref a) => a.reserve(), - BitmapKind::V2(ref b) => b.reserve(), - BitmapKind::V3(ref c) => c.reserve(), - BitmapKind::V4(ref d) => d.reserve(), - BitmapKind::V5(ref e) => e.reserve(), - } - } -} - -trait BitmapOps { - fn unset(&self, index: usize); - fn reserve(&self) -> Option; -} - -macro_rules! impl_bitmap_ops { - ( $( $t:ty ),* $(,)? ) => {$( - impl BitmapOps for $t { - fn unset(&self, index: usize) { - let bit = 1 << index; - let old = self.fetch_and(!bit, Ordering::Release) & bit; - assert!(old != 0); - } - - fn reserve(&self) -> Option { - let initial = self.load(Ordering::Relaxed); - let unset_count = initial.count_zeros(); - let (mut index, mut bit) = match unset_count { - 0 => return None, - _ => (0, 1), - }; - for _ in 0..unset_count { - // find the next unset bit - while bit & initial != 0 { - index += 1; - bit = bit << 1; - } - let old = self.fetch_or(bit, Ordering::Acquire) & bit; - if old == 0 { - return Some(index); - } - index += 1; - bit = bit << 1; - } - None - } - } - )*}; -} - -impl_bitmap_ops!(AtomicU8, AtomicU16, AtomicU32, AtomicU64); - -impl BitmapOps for LargeBitmap { - fn unset(&self, index: usize) { - let mut inner = self.0.lock(); - let array = &mut inner.bits; - assert!(index < array.len() * 64); - let slot = index / 64; - let offset = index % 64; - let element = &mut array[slot]; - - let bit = 1 << offset; - let old = *element & bit; - *element = *element & !bit; - inner.unset_count += 1; - assert!(old != 0); - } - - fn reserve(&self) -> Option { - let mut inner = self.0.lock(); - if inner.unset_count == 0 { - return None; - } - let array = &mut inner.bits; - for slot in 0..array.len() { - if let (Some(offset), val) = reserve_u64(array[slot]) { - array[slot] = val; - inner.unset_count -= 1; - return Some(slot * 64 + offset); - } - } - unreachable!() - } -} - -fn reserve_u64(element: u64) -> (Option, u64) { - let (mut index, mut bit) = match element.count_zeros() { - 0 => return (None, element), - _ => (0, 1), - }; - // find the first unset bit - while bit & element != 0 { - index += 1; - bit = bit << 1; - } - (Some(index), element | bit) -} diff --git a/async-usercalls/src/alloc/mod.rs b/async-usercalls/src/alloc/mod.rs deleted file mode 100644 index d1a1abff..00000000 --- a/async-usercalls/src/alloc/mod.rs +++ /dev/null @@ -1,67 +0,0 @@ -use std::cell::RefCell; -use std::os::fortanix_sgx::usercalls::raw::ByteBuffer; - -mod allocator; -mod bitmap; -mod slab; -#[cfg(test)] -mod tests; - -use self::allocator::{LocalAllocator, SharedAllocator}; -pub use self::slab::{User, UserSafeExt}; - -/// Allocates a slice of bytes in userspace that is at least as large as `size`. -pub fn alloc_buf(size: usize) -> User<[u8]> { - if let Some(buf) = SHARED.alloc_buf(size) { - return buf; - } - LOCAL.with(|local| local.borrow_mut().alloc_buf(size)) -} - -/// Allocates a `ByteBuffer` in userspace. -pub fn alloc_byte_buffer() -> User { - if let Some(bb) = SHARED.alloc_byte_buffer() { - return bb; - } - LOCAL.with(|local| local.borrow_mut().alloc_byte_buffer()) -} - -lazy_static::lazy_static! { - static ref SHARED: SharedAllocator = SharedAllocator::new( - [ - 8192, // x 32 bytes - 4096, // x 64 bytes - 2048, // x 128 bytes - 1024, // x 256 bytes - 512, // x 512 bytes - 256, // x 1 KB - 64, // x 2 KB - 32, // x 4 KB - 16, // x 8 KB - 1024, // x 16 KB - 32, // x 32 KB - 16, // x 64 KB - ], - 8192, // x ByteBuffer(s) - ); -} - -std::thread_local! { - static LOCAL: RefCell = RefCell::new(LocalAllocator::new( - [ - 128, // x 32 bytes - 64, // x 64 bytes - 32, // x 128 bytes - 16, // x 256 bytes - 8, // x 512 bytes - 8, // x 1 KB - 8, // x 2 KB - 8, // x 4 KB - 8, // x 8 KB - 8, // x 16 KB - 8, // x 32 KB - 8, // x 64 KB - ], - 64, // x ByteBuffer(s) - )); -} diff --git a/async-usercalls/src/alloc/slab.rs b/async-usercalls/src/alloc/slab.rs deleted file mode 100644 index a9e0a0c4..00000000 --- a/async-usercalls/src/alloc/slab.rs +++ /dev/null @@ -1,198 +0,0 @@ -use super::bitmap::OptionalBitmap; -use std::cell::UnsafeCell; -use std::mem; -use std::ops::{Deref, DerefMut}; -use std::os::fortanix_sgx::usercalls::alloc::{User as StdUser, UserRef, UserSafe, UserSafeSized}; -use std::sync::Arc; - -pub const MIN_COUNT: usize = 8; -pub const MAX_COUNT: usize = 64 * 1024; -pub const MIN_UNIT_LEN: usize = 32; - -pub trait SlabAllocator { - type Output; - - fn alloc(&self) -> Option; - fn count(&self) -> usize; - fn total_size(&self) -> usize; -} - -impl SlabAllocator for Vec { - type Output = A::Output; - - fn alloc(&self) -> Option { - for a in self.iter() { - if let Some(buf) = a.alloc() { - return Some(buf); - } - } - None - } - - fn count(&self) -> usize { - self.iter().map(|a| a.count()).sum() - } - - fn total_size(&self) -> usize { - self.iter().map(|a| a.total_size()).sum() - } -} - -struct Storage { - user: UnsafeCell>, - bitmap: OptionalBitmap, -} - -pub struct BufSlab { - storage: Arc>, - unit_len: usize, -} - -impl BufSlab { - pub fn new(count: usize, unit_len: usize) -> Self { - assert!(count.is_power_of_two() && count >= MIN_COUNT && count <= MAX_COUNT); - assert!(unit_len.is_power_of_two() && unit_len >= MIN_UNIT_LEN); - BufSlab { - storage: Arc::new(Storage { - user: UnsafeCell::new(StdUser::<[u8]>::uninitialized(count * unit_len)), - bitmap: OptionalBitmap::new(count), - }), - unit_len, - } - } -} - -impl SlabAllocator for BufSlab { - type Output = User<[u8]>; - - fn alloc(&self) -> Option { - let index = self.storage.bitmap.reserve()?; - let start = index * self.unit_len; - let end = start + self.unit_len; - let user = unsafe { &mut *self.storage.user.get() }; - let user_ref = &mut user[start..end]; - Some(User { - user_ref, - storage: self.storage.clone(), - index, - }) - } - - fn count(&self) -> usize { - self.total_size() / self.unit_len - } - - fn total_size(&self) -> usize { - let user = unsafe { &*self.storage.user.get() }; - user.len() - } -} - -pub trait UserSafeExt: UserSafe { - type Element: UserSafeSized; -} - -impl UserSafeExt for [T] { - type Element = T; -} - -impl UserSafeExt for T { - type Element = T; -} - -pub struct User { - user_ref: &'static mut UserRef, - storage: Arc>, - index: usize, -} - -unsafe impl Send for User {} - -impl User { - pub fn uninitialized() -> Self { - let storage = Arc::new(Storage { - user: UnsafeCell::new(StdUser::<[T]>::uninitialized(1)), - bitmap: OptionalBitmap::none(), - }); - let user = unsafe { &mut *storage.user.get() }; - let user_ref = &mut user[0]; - Self { - user_ref, - storage, - index: 0, - } - } -} - -impl User<[T]> { - pub fn uninitialized(n: usize) -> Self { - let storage = Arc::new(Storage { - user: UnsafeCell::new(StdUser::<[T]>::uninitialized(n)), - bitmap: OptionalBitmap::none(), - }); - let user = unsafe { &mut *storage.user.get() }; - let user_ref = &mut user[..]; - Self { - user_ref, - storage, - index: 0, - } - } -} - -impl Drop for User { - fn drop(&mut self) { - self.storage.bitmap.unset(self.index); - } -} - -impl Deref for User { - type Target = UserRef; - - fn deref(&self) -> &Self::Target { - self.user_ref - } -} - -impl DerefMut for User { - fn deref_mut(&mut self) -> &mut Self::Target { - self.user_ref - } -} - -pub struct Slab(Arc>); - -impl Slab { - pub fn new(count: usize) -> Self { - assert!(count.is_power_of_two() && count >= MIN_COUNT && count <= MAX_COUNT); - Slab(Arc::new(Storage { - user: UnsafeCell::new(StdUser::<[T]>::uninitialized(count)), - bitmap: OptionalBitmap::new(count), - })) - } -} - -impl SlabAllocator for Slab { - type Output = User; - - fn alloc(&self) -> Option { - let index = self.0.bitmap.reserve()?; - let user = unsafe { &mut *self.0.user.get() }; - let user_ref = &mut user[index]; - Some(User { - user_ref, - storage: self.0.clone(), - index, - }) - } - - fn count(&self) -> usize { - let user = unsafe { &*self.0.user.get() }; - user.len() - } - - fn total_size(&self) -> usize { - let user = unsafe { &*self.0.user.get() }; - user.len() * mem::size_of::() - } -} diff --git a/async-usercalls/src/alloc/tests.rs b/async-usercalls/src/alloc/tests.rs deleted file mode 100644 index 0d9906ab..00000000 --- a/async-usercalls/src/alloc/tests.rs +++ /dev/null @@ -1,263 +0,0 @@ -use super::allocator::SharedAllocator; -use super::bitmap::*; -use super::slab::{BufSlab, Slab, SlabAllocator, User}; -use crossbeam_channel as mpmc; -use std::collections::HashSet; -use std::os::fortanix_sgx::usercalls::raw::ByteBuffer; -use std::sync::atomic::*; -use std::sync::Arc; -use std::thread; -use std::time::Instant; - -// Copied from Rust tests (test/ui/mpsc_stress.rs) -struct Barrier { - // Not using mutex/condvar for precision - shared: Arc, - count: usize, -} - -impl Barrier { - fn new(count: usize) -> Vec { - let shared = Arc::new(AtomicUsize::new(0)); - (0..count) - .map(|_| Barrier { - shared: shared.clone(), - count: count, - }) - .collect() - } - - /// Returns when `count` threads enter `wait` - fn wait(self) { - self.shared.fetch_add(1, Ordering::SeqCst); - while self.shared.load(Ordering::SeqCst) != self.count {} - } -} - -#[test] -fn bitmap() { - const BITS: usize = 1024; - let bitmap = OptionalBitmap::new(BITS); - for _ in 0..BITS { - assert!(bitmap.reserve().is_some()); - } - let mut indices = vec![34, 7, 5, 6, 120, 121, 122, 127, 0, 9] - .into_iter() - .collect::>(); - for &i in indices.iter() { - bitmap.unset(i); - } - while let Some(index) = bitmap.reserve() { - assert!(indices.remove(&index)); - } - assert!(indices.is_empty()); -} - -#[test] -fn bitmap_concurrent_use() { - const BITS: usize = 16; - const THREADS: usize = 4; - let bitmap = Arc::new(OptionalBitmap::new(BITS)); - for _ in 0..BITS - THREADS { - bitmap.reserve().unwrap(); - } - let mut handles = Vec::with_capacity(THREADS); - let mut barriers = Barrier::new(THREADS); - let (tx, rx) = mpmc::unbounded(); - - for _ in 0..THREADS { - let bitmap = Arc::clone(&bitmap); - let barrier = barriers.pop().unwrap(); - let tx = tx.clone(); - - handles.push(thread::spawn(move || { - barrier.wait(); - let index = bitmap.reserve().unwrap(); - tx.send(index).unwrap(); - })); - } - drop(tx); - for x in rx.iter() { - bitmap.unset(x); - } - for h in handles { - h.join().unwrap(); - } -} - -#[test] -fn buf_slab() { - const COUNT: usize = 16; - const SIZE: usize = 64; - let buf_slab = BufSlab::new(COUNT, SIZE); - - let bufs = (0..COUNT) - .map(|_| { - let buf = buf_slab.alloc().unwrap(); - assert!(buf.len() == SIZE); - buf - }) - .collect::>(); - - assert!(buf_slab.alloc().is_none()); - drop(bufs); - assert!(buf_slab.alloc().is_some()); -} - -#[test] -fn byte_buffer_slab() { - const COUNT: usize = 256; - let slab = Slab::::new(COUNT); - - let bufs = (0..COUNT) - .map(|_| slab.alloc().unwrap()) - .collect::>>(); - - assert!(slab.alloc().is_none()); - drop(bufs); - assert!(slab.alloc().is_some()); -} - -#[test] -fn user_is_send() { - const COUNT: usize = 16; - const SIZE: usize = 1024; - let buf_slab = BufSlab::new(COUNT, SIZE); - - let mut user = buf_slab.alloc().unwrap(); - - let h = thread::spawn(move || { - user[0..5].copy_from_enclave(b"hello"); - }); - - h.join().unwrap(); -} - -fn slab_speed(count: usize) { - let t0 = Instant::now(); - const SIZE: usize = 32; - const N: u32 = 100_000; - let buf_slab = BufSlab::new(count, SIZE); - - let bufs = (0..count - 1).map(|_| buf_slab.alloc().unwrap()).collect::>(); - - let mut x = 0; - for _ in 0..N { - let b = buf_slab.alloc().unwrap(); - x += b.len(); - } - drop(bufs); - drop(buf_slab); - let d = t0.elapsed(); - assert!(x > 0); // prevent the compiler from removing the whole loop above in release mode - println!("count = {} took {:?}", count, d / N); -} - -#[test] -#[ignore] -fn speed_slab() { - println!("\n"); - for i in 3..=16 { - slab_speed(1 << i); - } -} - -#[test] -#[ignore] -fn speed_direct() { - use std::os::fortanix_sgx::usercalls::alloc::User; - - let t0 = Instant::now(); - const SIZE: usize = 32; - const N: u32 = 100_000; - let mut x = 0; - for _ in 0..N { - let b = User::<[u8]>::uninitialized(SIZE); - x += b.len(); - } - let d = t0.elapsed(); - assert!(x > 0); - println!("took {:?}", d / N); -} - -#[test] -fn shared_allocator() { - let a = SharedAllocator::new( - [ - /*32:*/ 2048, /*64:*/ 1024, /*128:*/ 512, /*256:*/ 256, /*512:*/ 128, - /*1K:*/ 64, /*2K:*/ 0, /*4K:*/ 0, /*8K:*/ 0, /*16K:*/ 0, /*32K:*/ 0, - /*64K:*/ 1024, - ], - 1024, - ); - for size in 1..=32 { - let b = a.alloc_buf(size).unwrap(); - assert!(b.len() == 32); - } - for size in 33..=64 { - let b = a.alloc_buf(size).unwrap(); - assert!(b.len() == 64); - } - for &size in &[65, 79, 83, 120, 127, 128] { - let b = a.alloc_buf(size).unwrap(); - assert!(b.len() == 128); - } - for &size in &[129, 199, 210, 250, 255, 256] { - let b = a.alloc_buf(size).unwrap(); - assert!(b.len() == 256); - } - for &size in &[257, 299, 365, 500, 512] { - let b = a.alloc_buf(size).unwrap(); - assert!(b.len() == 512); - } - for &size in &[513, 768, 1023, 1024] { - let b = a.alloc_buf(size).unwrap(); - assert!(b.len() == 1024); - } - for i in 2..=32 { - assert!(a.alloc_buf(i * 1024).is_none()); - } - for i in 33..=64 { - let b = a.alloc_buf(i * 1024).unwrap(); - assert!(b.len() == 64 * 1024); - } -} - -fn alloc_speed(count: usize) { - let t0 = Instant::now(); - const SIZE: usize = 32; - const N: u32 = 100_000; - - let bufs = (0..count - 1).map(|_| super::alloc_buf(SIZE)).collect::>(); - - let mut x = 0; - for _ in 0..N { - let b = super::alloc_buf(SIZE); - x += b.len(); - } - drop(bufs); - let d = t0.elapsed(); - assert!(x > 0); - println!("count = {} took {:?}", count, d / N); -} - -#[test] -#[ignore] -fn speed_overall() { - println!("\n"); - for i in 3..=14 { - alloc_speed(1 << i); - } -} - -#[test] -fn alloc_buf_size() { - let b = super::alloc_buf(32); - assert_eq!(b.len(), 32); - let b = super::alloc_buf(128); - assert_eq!(b.len(), 128); - let b = super::alloc_buf(900); - assert_eq!(b.len(), 1024); - let b = super::alloc_buf(8 * 1024); - assert_eq!(b.len(), 8 * 1024); -} diff --git a/async-usercalls/src/hacks/mod.rs b/async-usercalls/src/hacks/mod.rs index f04f3655..71c0639f 100644 --- a/async-usercalls/src/hacks/mod.rs +++ b/async-usercalls/src/hacks/mod.rs @@ -1,5 +1,5 @@ use std::ops::{Deref, DerefMut}; -use std::os::fortanix_sgx::usercalls::alloc::UserSafeSized; +use std::os::fortanix_sgx::usercalls::alloc::{User, UserSafeSized}; use std::os::fortanix_sgx::usercalls::raw::ByteBuffer; mod async_queues; @@ -58,4 +58,5 @@ impl DerefMut for MakeSend { } unsafe impl Send for MakeSend {} -unsafe impl Send for MakeSend> {} +unsafe impl Send for MakeSend> {} +unsafe impl Send for MakeSend> {} diff --git a/async-usercalls/src/io_bufs.rs b/async-usercalls/src/io_bufs.rs index 7eb6b6e0..825a7a8a 100644 --- a/async-usercalls/src/io_bufs.rs +++ b/async-usercalls/src/io_bufs.rs @@ -1,9 +1,8 @@ -use crate::alloc::User; use std::cell::UnsafeCell; use std::cmp; use std::io::IoSlice; use std::ops::{Deref, DerefMut, Range}; -use std::os::fortanix_sgx::usercalls::alloc::UserRef; +use std::os::fortanix_sgx::usercalls::alloc::{User, UserRef}; use std::sync::Arc; pub struct UserBuf(UserBufKind); diff --git a/async-usercalls/src/lib.rs b/async-usercalls/src/lib.rs index ab2b9e01..0c51d29a 100644 --- a/async-usercalls/src/lib.rs +++ b/async-usercalls/src/lib.rs @@ -10,7 +10,6 @@ use std::panic; use std::sync::Mutex; use std::time::Duration; -mod alloc; mod batch_drop; mod callback; mod duplicated; @@ -23,7 +22,6 @@ mod raw; #[cfg(test)] mod tests; -pub use self::alloc::{alloc_buf, alloc_byte_buffer, User, UserSafeExt}; pub use self::batch_drop::batch_drop; pub use self::callback::CbFn; pub use self::io_bufs::{ReadBuffer, UserBuf, WriteBuffer}; diff --git a/async-usercalls/src/provider_api.rs b/async-usercalls/src/provider_api.rs index 0dff9f7f..234dbddb 100644 --- a/async-usercalls/src/provider_api.rs +++ b/async-usercalls/src/provider_api.rs @@ -1,4 +1,3 @@ -use crate::alloc::{alloc_buf, alloc_byte_buffer, User}; use crate::batch_drop; use crate::hacks::{new_std_listener, new_std_stream, MakeSend}; use crate::io_bufs::UserBuf; @@ -8,7 +7,7 @@ use fortanix_sgx_abi::Fd; use std::io; use std::mem::{self, ManuallyDrop}; use std::net::{TcpListener, TcpStream}; -use std::os::fortanix_sgx::usercalls::alloc::{User as StdUser, UserRef, UserSafe}; +use std::os::fortanix_sgx::usercalls::alloc::{User, UserRef, UserSafe}; use std::os::fortanix_sgx::usercalls::raw::ByteBuffer; use std::time::{Duration, SystemTime, UNIX_EPOCH}; @@ -24,11 +23,11 @@ impl AsyncUsercallProvider { where F: FnOnce(io::Result, User<[u8]>) + Send + 'static, { - let mut read_buf = ManuallyDrop::new(read_buf); + let mut read_buf = ManuallyDrop::new(MakeSend::new(read_buf)); let ptr = read_buf.as_mut_ptr(); let len = read_buf.len(); let cb = move |res: io::Result| { - let read_buf = ManuallyDrop::into_inner(read_buf); + let read_buf = ManuallyDrop::into_inner(read_buf).into_inner(); callback(res, read_buf); }; unsafe { self.raw_read(fd, ptr, len, Some(cb.into())) } @@ -93,8 +92,8 @@ impl AsyncUsercallProvider { where F: FnOnce(io::Result) + Send + 'static, { - let mut addr_buf = ManuallyDrop::new(alloc_buf(addr.len())); - let mut local_addr = ManuallyDrop::new(MakeSend::new(alloc_byte_buffer())); + let mut addr_buf = ManuallyDrop::new(MakeSend::new(User::<[u8]>::uninitialized(addr.len()))); + let mut local_addr = ManuallyDrop::new(MakeSend::new(User::::uninitialized())); addr_buf[0..addr.len()].copy_from_enclave(addr.as_bytes()); let addr_buf_ptr = addr_buf.as_raw_mut_ptr() as *mut u8; @@ -121,8 +120,8 @@ impl AsyncUsercallProvider { where F: FnOnce(io::Result) + Send + 'static, { - let mut local_addr = ManuallyDrop::new(MakeSend::new(alloc_byte_buffer())); - let mut peer_addr = ManuallyDrop::new(MakeSend::new(alloc_byte_buffer())); + let mut local_addr = ManuallyDrop::new(MakeSend::new(User::::uninitialized())); + let mut peer_addr = ManuallyDrop::new(MakeSend::new(User::::uninitialized())); let local_addr_ptr = local_addr.as_raw_mut_ptr(); let peer_addr_ptr = peer_addr.as_raw_mut_ptr(); @@ -149,9 +148,9 @@ impl AsyncUsercallProvider { where F: FnOnce(io::Result) + Send + 'static, { - let mut addr_buf = ManuallyDrop::new(alloc_buf(addr.len())); - let mut local_addr = ManuallyDrop::new(MakeSend::new(alloc_byte_buffer())); - let mut peer_addr = ManuallyDrop::new(MakeSend::new(alloc_byte_buffer())); + let mut addr_buf = ManuallyDrop::new(MakeSend::new(User::<[u8]>::uninitialized(addr.len()))); + let mut local_addr = ManuallyDrop::new(MakeSend::new(User::::uninitialized())); + let mut peer_addr = ManuallyDrop::new(MakeSend::new(User::::uninitialized())); addr_buf[0..addr.len()].copy_from_enclave(addr.as_bytes()); let addr_buf_ptr = addr_buf.as_raw_mut_ptr() as *mut u8; @@ -180,10 +179,10 @@ impl AsyncUsercallProvider { pub fn alloc(&self, callback: F) where T: UserSafe, - F: FnOnce(io::Result>) + Send + 'static, + F: FnOnce(io::Result>) + Send + 'static, { let cb = move |res: io::Result<*mut u8>| { - let res = res.map(|ptr| unsafe { StdUser::::from_raw(ptr as _) }); + let res = res.map(|ptr| unsafe { User::::from_raw(ptr as _) }); callback(res); }; unsafe { @@ -200,10 +199,10 @@ impl AsyncUsercallProvider { pub fn alloc_slice(&self, len: usize, callback: F) where [T]: UserSafe, - F: FnOnce(io::Result>) + Send + 'static, + F: FnOnce(io::Result>) + Send + 'static, { let cb = move |res: io::Result<*mut u8>| { - let res = res.map(|ptr| unsafe { StdUser::<[T]>::from_raw_parts(ptr as _, len) }); + let res = res.map(|ptr| unsafe { User::<[T]>::from_raw_parts(ptr as _, len) }); callback(res); }; unsafe { @@ -217,7 +216,7 @@ impl AsyncUsercallProvider { /// /// Please refer to the type-level documentation for general notes about /// callbacks. - pub fn free(&self, mut buf: StdUser, callback: Option) + pub fn free(&self, mut buf: User, callback: Option) where T: ?Sized + UserSafe, F: FnOnce() + Send + 'static, @@ -263,7 +262,7 @@ fn copy_user_buffer(buf: &UserRef) -> Vec { unsafe { let buf = buf.to_enclave(); if buf.len > 0 { - let user = StdUser::from_raw_parts(buf.data as _, buf.len); + let user = User::from_raw_parts(buf.data as _, buf.len); let v = user.to_enclave(); batch_drop(user); v diff --git a/async-usercalls/src/tests.rs b/async-usercalls/src/tests.rs index 406c8244..2bdc473b 100644 --- a/async-usercalls/src/tests.rs +++ b/async-usercalls/src/tests.rs @@ -5,7 +5,7 @@ use std::io; use std::net::{TcpListener, TcpStream}; use std::ops::Deref; use std::os::fortanix_sgx::io::AsRawFd; -use std::os::fortanix_sgx::usercalls::alloc::User as StdUser; +use std::os::fortanix_sgx::usercalls::alloc::User; use std::sync::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering}; use std::sync::Arc; use std::thread; @@ -192,12 +192,10 @@ fn safe_alloc_free() { rx.recv().unwrap(); } -unsafe impl Send for MakeSend> {} - #[test] fn write_buffer_basic() { const LENGTH: usize = 1024; - let mut write_buffer = WriteBuffer::new(super::alloc_buf(1024)); + let mut write_buffer = WriteBuffer::new(User::<[u8]>::uninitialized(1024)); let buf = vec![0u8; LENGTH]; assert_eq!(write_buffer.write(&buf), LENGTH); @@ -213,7 +211,7 @@ fn write_buffer_basic() { #[should_panic] fn call_consumable_chunk_twice() { const LENGTH: usize = 1024; - let mut write_buffer = WriteBuffer::new(super::alloc_buf(1024)); + let mut write_buffer = WriteBuffer::new(User::<[u8]>::uninitialized(1024)); let buf = vec![0u8; LENGTH]; assert_eq!(write_buffer.write(&buf), LENGTH); @@ -228,19 +226,19 @@ fn call_consumable_chunk_twice() { #[should_panic] fn consume_wrong_buf() { const LENGTH: usize = 1024; - let mut write_buffer = WriteBuffer::new(super::alloc_buf(1024)); + let mut write_buffer = WriteBuffer::new(User::<[u8]>::uninitialized(1024)); let buf = vec![0u8; LENGTH]; assert_eq!(write_buffer.write(&buf), LENGTH); assert_eq!(write_buffer.write(&buf), 0); - let unrelated_buf: UserBuf = super::alloc_buf(512).into(); + let unrelated_buf: UserBuf = User::<[u8]>::uninitialized(512).into(); write_buffer.consume(unrelated_buf, 100); } #[test] fn read_buffer_basic() { - let mut buf = super::alloc_buf(64); + let mut buf = User::<[u8]>::uninitialized(64); const DATA: &'static [u8] = b"hello"; buf[0..DATA.len()].copy_from_enclave(DATA); @@ -293,7 +291,8 @@ impl FnOnce<(io::Result,)> for KeepAccepting { read: true, provider: self.provider.clone(), }; - self.provider.read(fd, alloc_buf(Echo::READ_BUF_SIZE), cb); + self.provider + .read(fd, User::<[u8]>::uninitialized(Echo::READ_BUF_SIZE), cb); } let provider = Arc::clone(&self.provider); provider.accept_stream(self.listener.as_raw_fd(), self); @@ -345,7 +344,11 @@ impl FnOnce<(io::Result, UserBuf)> for Echo { Ok(len) if len > 0 => { self.read = true; let provider = Arc::clone(&self.provider); - provider.read(self.stream.as_raw_fd(), alloc_buf(Echo::READ_BUF_SIZE), self); + provider.read( + self.stream.as_raw_fd(), + User::<[u8]>::uninitialized(Echo::READ_BUF_SIZE), + self, + ); } _ => self.close(), } From c038129912184bf5ab68a61cc3e655a7e425b318 Mon Sep 17 00:00:00 2001 From: Mohsen Zohrevandi Date: Tue, 10 Nov 2020 11:06:18 -0800 Subject: [PATCH 07/22] Remove reserved field --- async-usercalls/src/hacks/mod.rs | 5 +---- async-usercalls/src/provider_core.rs | 4 +--- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/async-usercalls/src/hacks/mod.rs b/async-usercalls/src/hacks/mod.rs index 71c0639f..9011c63e 100644 --- a/async-usercalls/src/hacks/mod.rs +++ b/async-usercalls/src/hacks/mod.rs @@ -22,10 +22,7 @@ unsafe impl UserSafeSized for Return {} #[repr(C)] #[derive(Copy, Clone, Default)] -pub struct Cancel { - /// Reserved for future use. - pub reserved: u64, -} +pub struct Cancel; unsafe impl UserSafeSized for Cancel {} diff --git a/async-usercalls/src/provider_core.rs b/async-usercalls/src/provider_core.rs index 9acb318c..5b027c16 100644 --- a/async-usercalls/src/provider_core.rs +++ b/async-usercalls/src/provider_core.rs @@ -44,9 +44,7 @@ impl ProviderCore { assert!(usercall.id != 0); let cancel = Identified { id: usercall.id, - data: Cancel { - reserved: 0, - }, + data: Cancel, }; PROVIDERS .usercall_sender() From 6eb6a3483e1adb33c65a5b40e5a74b96ebbfe270 Mon Sep 17 00:00:00 2001 From: Mohsen Zohrevandi Date: Fri, 20 Nov 2020 15:29:45 -0800 Subject: [PATCH 08/22] Add CallbackHandlerWaker --- async-usercalls/src/lib.rs | 47 ++++++++++++++++++++++++++++++++---- async-usercalls/src/tests.rs | 19 +++++++++++++++ 2 files changed, 61 insertions(+), 5 deletions(-) diff --git a/async-usercalls/src/lib.rs b/async-usercalls/src/lib.rs index 0c51d29a..ce58b8ad 100644 --- a/async-usercalls/src/lib.rs +++ b/async-usercalls/src/lib.rs @@ -67,10 +67,12 @@ impl AsyncUsercallProvider { let callbacks = Mutex::new(HashMap::new()); let (callback_tx, callback_rx) = mpmc::unbounded(); let provider = Self { core, callback_tx }; + let waker = CallbackHandlerWaker::new(); let handler = CallbackHandler { return_rx, callbacks, callback_rx, + waker, }; (provider, handler) } @@ -91,22 +93,56 @@ impl AsyncUsercallProvider { } } +#[derive(Clone)] +pub struct CallbackHandlerWaker { + rx: mpmc::Receiver<()>, + tx: mpmc::Sender<()>, +} + +impl CallbackHandlerWaker { + fn new() -> Self { + let (tx, rx) = mpmc::bounded(1); + Self { tx, rx } + } + + /// Interrupts the currently running or a future call to the related + /// CallbackHandler's `poll()`. + pub fn wake(&self) { + let _ = self.tx.try_send(()); + } + + /// Clears the effect of a previous call to `self.wake()` that is not yet + /// observed by `CallbackHandler::poll()`. + pub fn clear(&self) { + let _ = self.rx.try_recv(); + } +} + pub struct CallbackHandler { return_rx: mpmc::Receiver>, callbacks: Mutex>, // This is used so that threads sending usercalls don't have to take the lock. callback_rx: mpmc::Receiver<(u64, Callback)>, + waker: CallbackHandlerWaker, } impl CallbackHandler { + // Returns an object that can be used to interrupt a blocked `self.poll()`. + pub fn waker(&self) -> CallbackHandlerWaker { + self.waker.clone() + } + #[inline] fn recv_returns(&self, timeout: Option, returns: &mut [Identified]) -> usize { let first = match timeout { - None => self.return_rx.recv().ok(), - Some(timeout) => match self.return_rx.recv_timeout(timeout) { - Ok(val) => Some(val), - Err(mpmc::RecvTimeoutError::Disconnected) => None, - Err(mpmc::RecvTimeoutError::Timeout) => return 0, + None => mpmc::select! { + recv(self.return_rx) -> res => res.ok(), + recv(self.waker.rx) -> _res => return 0, + }, + Some(timeout) => mpmc::select! { + recv(self.return_rx) -> res => res.ok(), + recv(self.waker.rx) -> _res => return 0, + default(timeout) => return 0, }, } .expect("return channel closed unexpectedly"); @@ -122,6 +158,7 @@ impl CallbackHandler { /// functions. If `timeout` is `None`, it will block execution until at /// least one return is received, otherwise it will block until there is a /// return or timeout is elapsed. Returns the number of executed callbacks. + /// This can be interrupted using `CallbackHandlerWaker::wake()`. pub fn poll(&self, timeout: Option) -> usize { // 1. wait for returns let mut returns = [Identified { diff --git a/async-usercalls/src/tests.rs b/async-usercalls/src/tests.rs index 2bdc473b..ff838c48 100644 --- a/async-usercalls/src/tests.rs +++ b/async-usercalls/src/tests.rs @@ -251,6 +251,25 @@ fn read_buffer_basic() { assert_eq!(&buf, b"hello\0\0\0"); } +#[test] +fn callback_handler_waker() { + let (_provider, handler) = AsyncUsercallProvider::new(); + let waker = handler.waker(); + let (tx, rx) = mpmc::bounded(1); + let h = thread::spawn(move || { + let n1 = handler.poll(None); + tx.send(()).unwrap(); + let n2 = handler.poll(Some(Duration::from_secs(3))); + tx.send(()).unwrap(); + n1 + n2 + }); + for _ in 0..2 { + waker.wake(); + rx.recv().unwrap(); + } + assert_eq!(h.join().unwrap(), 0); +} + #[test] #[ignore] fn echo() { From 7668388a517f09d63422189c038fe8a98fbf9e8b Mon Sep 17 00:00:00 2001 From: Mohsen Zohrevandi Date: Wed, 2 Dec 2020 15:07:21 -0800 Subject: [PATCH 09/22] Minor fixes --- Cargo.lock | 7 ------- async-usercalls/Cargo.toml | 1 - async-usercalls/src/batch_drop.rs | 10 +++++----- 3 files changed, 5 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2c4b08d7..1580755e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -76,7 +76,6 @@ dependencies = [ "fortanix-sgx-abi", "ipc-queue", "lazy_static 1.4.0", - "spin", ] [[package]] @@ -2343,12 +2342,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - [[package]] name = "static_assertions" version = "1.1.0" diff --git a/async-usercalls/Cargo.toml b/async-usercalls/Cargo.toml index 64c1e0dc..c0cd25a0 100644 --- a/async-usercalls/Cargo.toml +++ b/async-usercalls/Cargo.toml @@ -23,7 +23,6 @@ fortanix-sgx-abi = { version = "0.4", path = "../fortanix-sgx-abi" } # External dependencies lazy_static = "1.4.0" # MIT/Apache-2.0 crossbeam-channel = "0.4" # MIT/Apache-2.0 -spin = "0.5" # MIT/Apache-2.0 fnv = "1.0" # MIT/Apache-2.0 # For cargo test --target x86_64-fortanix-unknown-sgx diff --git a/async-usercalls/src/batch_drop.rs b/async-usercalls/src/batch_drop.rs index f27b05c4..62435460 100644 --- a/async-usercalls/src/batch_drop.rs +++ b/async-usercalls/src/batch_drop.rs @@ -6,8 +6,8 @@ use std::mem; use std::os::fortanix_sgx::usercalls::alloc::{User, UserSafe}; use std::os::fortanix_sgx::usercalls::raw::UsercallNrs; -pub trait BatchDropable: private::BatchDropable {} -impl BatchDropable for T {} +pub trait BatchDroppable: private::BatchDroppable {} +impl BatchDroppable for T {} /// Drop the given value at some point in the future (no rush!). This is useful /// for freeing userspace memory when we don't particularly care about when the @@ -20,7 +20,7 @@ impl BatchDropable for T {} /// usercall queue is empty we still need to exit the enclave to signal the /// userspace that the queue is not empty anymore. The batch send would send /// multiple usercalls and notify the userspace at most once. -pub fn batch_drop(t: T) { +pub fn batch_drop(t: T) { t.batch_drop(); } @@ -85,11 +85,11 @@ mod private { static PROVIDER: RefCell = RefCell::new(BatchDropProvider::new()); } - pub trait BatchDropable { + pub trait BatchDroppable { fn batch_drop(self); } - impl BatchDropable for User { + impl BatchDroppable for User { fn batch_drop(self) { PROVIDER.with(|p| p.borrow_mut().free(self)); } From de6b5f32761e391f03153968517e579d571eada7 Mon Sep 17 00:00:00 2001 From: Mohsen Zohrevandi Date: Wed, 27 Jan 2021 15:32:13 -0800 Subject: [PATCH 10/22] Reorganize tests, address review comments --- async-usercalls/rustfmt.toml | 1 - async-usercalls/src/io_bufs.rs | 65 +++++ async-usercalls/src/lib.rs | 222 ++++++++++++++-- async-usercalls/src/queues.rs | 12 +- async-usercalls/src/raw.rs | 89 +++++++ async-usercalls/src/test_support.rs | 47 ++++ async-usercalls/src/tests.rs | 375 ---------------------------- 7 files changed, 412 insertions(+), 399 deletions(-) delete mode 100644 async-usercalls/rustfmt.toml create mode 100644 async-usercalls/src/test_support.rs delete mode 100644 async-usercalls/src/tests.rs diff --git a/async-usercalls/rustfmt.toml b/async-usercalls/rustfmt.toml deleted file mode 100644 index 75306517..00000000 --- a/async-usercalls/rustfmt.toml +++ /dev/null @@ -1 +0,0 @@ -max_width = 120 diff --git a/async-usercalls/src/io_bufs.rs b/async-usercalls/src/io_bufs.rs index 825a7a8a..a8ede0de 100644 --- a/async-usercalls/src/io_bufs.rs +++ b/async-usercalls/src/io_bufs.rs @@ -257,3 +257,68 @@ impl ReadBuffer { self.userbuf } } + +#[cfg(test)] +mod tests { + use super::*; + use std::os::fortanix_sgx::usercalls::alloc::User; + + #[test] + fn write_buffer_basic() { + const LENGTH: usize = 1024; + let mut write_buffer = WriteBuffer::new(User::<[u8]>::uninitialized(1024)); + + let buf = vec![0u8; LENGTH]; + assert_eq!(write_buffer.write(&buf), LENGTH); + assert_eq!(write_buffer.write(&buf), 0); + + let chunk = write_buffer.consumable_chunk().unwrap(); + write_buffer.consume(chunk, 200); + assert_eq!(write_buffer.write(&buf), 200); + assert_eq!(write_buffer.write(&buf), 0); + } + + #[test] + #[should_panic] + fn call_consumable_chunk_twice() { + const LENGTH: usize = 1024; + let mut write_buffer = WriteBuffer::new(User::<[u8]>::uninitialized(1024)); + + let buf = vec![0u8; LENGTH]; + assert_eq!(write_buffer.write(&buf), LENGTH); + assert_eq!(write_buffer.write(&buf), 0); + + let chunk1 = write_buffer.consumable_chunk().unwrap(); + let _ = write_buffer.consumable_chunk().unwrap(); + drop(chunk1); + } + + #[test] + #[should_panic] + fn consume_wrong_buf() { + const LENGTH: usize = 1024; + let mut write_buffer = WriteBuffer::new(User::<[u8]>::uninitialized(1024)); + + let buf = vec![0u8; LENGTH]; + assert_eq!(write_buffer.write(&buf), LENGTH); + assert_eq!(write_buffer.write(&buf), 0); + + let unrelated_buf: UserBuf = User::<[u8]>::uninitialized(512).into(); + write_buffer.consume(unrelated_buf, 100); + } + + #[test] + fn read_buffer_basic() { + let mut buf = User::<[u8]>::uninitialized(64); + const DATA: &'static [u8] = b"hello"; + buf[0..DATA.len()].copy_from_enclave(DATA); + + let mut read_buffer = ReadBuffer::new(buf, DATA.len()); + assert_eq!(read_buffer.len(), DATA.len()); + assert_eq!(read_buffer.remaining_bytes(), DATA.len()); + let mut buf = [0u8; 8]; + assert_eq!(read_buffer.read(&mut buf), DATA.len()); + assert_eq!(read_buffer.remaining_bytes(), 0); + assert_eq!(&buf, b"hello\0\0\0"); + } +} diff --git a/async-usercalls/src/lib.rs b/async-usercalls/src/lib.rs index ce58b8ad..eccc8f88 100644 --- a/async-usercalls/src/lib.rs +++ b/async-usercalls/src/lib.rs @@ -6,7 +6,6 @@ use crossbeam_channel as mpmc; use ipc_queue::Identified; use std::collections::HashMap; -use std::panic; use std::sync::Mutex; use std::time::Duration; @@ -20,7 +19,7 @@ mod provider_core; mod queues; mod raw; #[cfg(test)] -mod tests; +mod test_support; pub use self::batch_drop::batch_drop; pub use self::callback::CbFn; @@ -127,6 +126,8 @@ pub struct CallbackHandler { } impl CallbackHandler { + const RECV_BATCH_SIZE: usize = 1024; + // Returns an object that can be used to interrupt a blocked `self.poll()`. pub fn waker(&self) -> CallbackHandlerWaker { self.waker.clone() @@ -161,10 +162,7 @@ impl CallbackHandler { /// This can be interrupted using `CallbackHandlerWaker::wake()`. pub fn poll(&self, timeout: Option) -> usize { // 1. wait for returns - let mut returns = [Identified { - id: 0, - data: Return(0, 0), - }; 1024]; + let mut returns = [Identified::default(); Self::RECV_BATCH_SIZE]; let returns = match self.recv_returns(timeout, &mut returns) { 0 => return 0, n => &returns[..n], @@ -190,19 +188,211 @@ impl CallbackHandler { let mut count = 0; for (ret, cb) in ret_callbacks { if let Some(cb) = cb { - let _r = panic::catch_unwind(panic::AssertUnwindSafe(move || { - cb.call(ret.data); - })); + cb.call(ret.data); count += 1; - // if let Err(e) = _r { - // let msg = e - // .downcast_ref::() - // .map(String::as_str) - // .or_else(|| e.downcast_ref::<&str>().map(|&s| s)); - // println!("callback paniced: {:?}", msg); - // } } } count } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::hacks::MakeSend; + use crate::test_support::*; + use crossbeam_channel as mpmc; + use std::io; + use std::net::{TcpListener, TcpStream}; + use std::os::fortanix_sgx::io::AsRawFd; + use std::os::fortanix_sgx::usercalls::alloc::User; + use std::sync::atomic::{AtomicUsize, Ordering}; + use std::sync::Arc; + use std::thread; + use std::time::Duration; + + #[test] + fn cancel_accept() { + let provider = AutoPollingProvider::new(); + let port = 6688; + let addr = format!("0.0.0.0:{}", port); + let (tx, rx) = mpmc::bounded(1); + provider.bind_stream(&addr, move |res| { + tx.send(res).unwrap(); + }); + let bind_res = rx.recv().unwrap(); + let listener = bind_res.unwrap(); + let fd = listener.as_raw_fd(); + let accept_count = Arc::new(AtomicUsize::new(0)); + let accept_count1 = Arc::clone(&accept_count); + let (tx, rx) = mpmc::bounded(1); + let accept = provider.accept_stream(fd, move |res| { + if let Ok(_) = res { + accept_count1.fetch_add(1, Ordering::Relaxed); + } + tx.send(()).unwrap(); + }); + accept.cancel(); + thread::sleep(Duration::from_millis(10)); + let _ = TcpStream::connect(&addr); + let _ = rx.recv(); + assert_eq!(accept_count.load(Ordering::Relaxed), 0); + } + + #[test] + fn connect() { + let listener = TcpListener::bind("0.0.0.0:0").unwrap(); + let addr = listener.local_addr().unwrap().to_string(); + let provider = AutoPollingProvider::new(); + let (tx, rx) = mpmc::bounded(1); + provider.connect_stream(&addr, move |res| { + tx.send(res).unwrap(); + }); + let res = rx.recv().unwrap(); + assert!(res.is_ok()); + } + + #[test] + fn safe_alloc_free() { + let provider = AutoPollingProvider::new(); + + const LEN: usize = 64 * 1024; + let (tx, rx) = mpmc::bounded(1); + provider.alloc_slice::(LEN, move |res| { + let buf = res.expect("failed to allocate memory"); + tx.send(MakeSend::new(buf)).unwrap(); + }); + let user_buf = rx.recv().unwrap().into_inner(); + assert_eq!(user_buf.len(), LEN); + + let (tx, rx) = mpmc::bounded(1); + let cb = move || { + tx.send(()).unwrap(); + }; + provider.free(user_buf, Some(cb)); + rx.recv().unwrap(); + } + + #[test] + fn callback_handler_waker() { + let (_provider, handler) = AsyncUsercallProvider::new(); + let waker = handler.waker(); + let (tx, rx) = mpmc::bounded(1); + let h = thread::spawn(move || { + let n1 = handler.poll(None); + tx.send(()).unwrap(); + let n2 = handler.poll(Some(Duration::from_secs(3))); + tx.send(()).unwrap(); + n1 + n2 + }); + for _ in 0..2 { + waker.wake(); + rx.recv().unwrap(); + } + assert_eq!(h.join().unwrap(), 0); + } + + #[test] + #[ignore] + fn echo() { + println!(); + let provider = Arc::new(AutoPollingProvider::new()); + const ADDR: &'static str = "0.0.0.0:7799"; + let (tx, rx) = mpmc::bounded(1); + provider.bind_stream(ADDR, move |res| { + tx.send(res).unwrap(); + }); + let bind_res = rx.recv().unwrap(); + let listener = bind_res.unwrap(); + println!("bind done: {:?}", listener); + let fd = listener.as_raw_fd(); + let cb = KeepAccepting { + listener, + provider: Arc::clone(&provider), + }; + provider.accept_stream(fd, cb); + thread::sleep(Duration::from_secs(60)); + } + + struct KeepAccepting { + listener: TcpListener, + provider: Arc, + } + + impl FnOnce<(io::Result,)> for KeepAccepting { + type Output = (); + + extern "rust-call" fn call_once(self, args: (io::Result,)) -> Self::Output { + let res = args.0; + println!("accept result: {:?}", res); + if let Ok(stream) = res { + let fd = stream.as_raw_fd(); + let cb = Echo { + stream, + read: true, + provider: self.provider.clone(), + }; + self.provider + .read(fd, User::<[u8]>::uninitialized(Echo::READ_BUF_SIZE), cb); + } + let provider = Arc::clone(&self.provider); + provider.accept_stream(self.listener.as_raw_fd(), self); + } + } + + struct Echo { + stream: TcpStream, + read: bool, + provider: Arc, + } + + impl Echo { + const READ_BUF_SIZE: usize = 1024; + + fn close(self) { + let fd = self.stream.as_raw_fd(); + println!("connection closed, fd = {}", fd); + self.provider.close(fd, None::>); + } + } + + // read callback + impl FnOnce<(io::Result, User<[u8]>)> for Echo { + type Output = (); + + extern "rust-call" fn call_once(mut self, args: (io::Result, User<[u8]>)) -> Self::Output { + let (res, user) = args; + assert!(self.read); + match res { + Ok(len) if len > 0 => { + self.read = false; + let provider = Arc::clone(&self.provider); + provider.write(self.stream.as_raw_fd(), (user, 0..len).into(), self); + } + _ => self.close(), + } + } + } + + // write callback + impl FnOnce<(io::Result, UserBuf)> for Echo { + type Output = (); + + extern "rust-call" fn call_once(mut self, args: (io::Result, UserBuf)) -> Self::Output { + let (res, _) = args; + assert!(!self.read); + match res { + Ok(len) if len > 0 => { + self.read = true; + let provider = Arc::clone(&self.provider); + provider.read( + self.stream.as_raw_fd(), + User::<[u8]>::uninitialized(Echo::READ_BUF_SIZE), + self, + ); + } + _ => self.close(), + } + } + } +} diff --git a/async-usercalls/src/queues.rs b/async-usercalls/src/queues.rs index 75fbf2eb..fc7bbd07 100644 --- a/async-usercalls/src/queues.rs +++ b/async-usercalls/src/queues.rs @@ -80,7 +80,7 @@ struct ReturnHandler { } impl ReturnHandler { - const N: usize = 1024; + const RECV_BATCH_SIZE: usize = 1024; fn send(&self, returns: &[Identified]) { // This should hold the lock only for a short amount of time @@ -90,6 +90,8 @@ impl ReturnHandler { let provider_map = self.provider_map.lock().unwrap(); for ret in returns { let provider_id = (ret.id >> 32) as u32; + // NOTE: some providers might decide not to receive results of usercalls they send + // because the results are not interesting, e.g. BatchDropProvider. if let Some(sender) = provider_map.get(provider_id).and_then(|entry| entry.as_ref()) { let _ = sender.send(*ret); } @@ -97,18 +99,14 @@ impl ReturnHandler { } fn run(self) { - const DEFAULT_RETURN: Identified = Identified { - id: 0, - data: Return(0, 0), - }; + let mut returns = [Identified::default(); Self::RECV_BATCH_SIZE]; loop { - let mut returns = [DEFAULT_RETURN; Self::N]; let first = match self.return_queue_rx.recv() { Ok(ret) => ret, Err(RecvError::Closed) => break, }; let mut count = 0; - for ret in iter::once(first).chain(self.return_queue_rx.try_iter().take(Self::N - 1)) { + for ret in iter::once(first).chain(self.return_queue_rx.try_iter().take(Self::RECV_BATCH_SIZE - 1)) { assert!(ret.id != 0); returns[count] = ret; count += 1; diff --git a/async-usercalls/src/raw.rs b/async-usercalls/src/raw.rs index fb2d4fac..d516bc69 100644 --- a/async-usercalls/src/raw.rs +++ b/async-usercalls/src/raw.rs @@ -153,3 +153,92 @@ impl RawApi for AsyncUsercallProvider { self.send_usercall(u, callback.map(|cb| Callback::Free(cb))); } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_support::*; + use crossbeam_channel as mpmc; + use std::io; + use std::sync::atomic::{AtomicPtr, Ordering}; + use std::sync::Arc; + use std::thread; + use std::time::{Duration, UNIX_EPOCH}; + + #[test] + fn get_time_async_raw() { + fn run(tid: u32, provider: AutoPollingProvider) -> (u32, u32, Duration) { + let pid = provider.provider_id(); + const N: usize = 500; + let (tx, rx) = mpmc::bounded(N); + for _ in 0..N { + let tx = tx.clone(); + let cb = move |d| { + let system_time = UNIX_EPOCH + Duration::from_nanos(d); + tx.send(system_time).unwrap(); + }; + unsafe { + provider.raw_insecure_time(Some(cb.into())); + } + } + let mut all = Vec::with_capacity(N); + for _ in 0..N { + all.push(rx.recv().unwrap()); + } + + assert_eq!(all.len(), N); + // The results are returned in arbitrary order + all.sort(); + let t0 = *all.first().unwrap(); + let tn = *all.last().unwrap(); + let total = tn.duration_since(t0).unwrap(); + (tid, pid, total / N as u32) + } + + println!(); + const THREADS: usize = 4; + let mut providers = Vec::with_capacity(THREADS); + for _ in 0..THREADS { + providers.push(AutoPollingProvider::new()); + } + let mut handles = Vec::with_capacity(THREADS); + for (i, provider) in providers.into_iter().enumerate() { + handles.push(thread::spawn(move || run(i as u32, provider))); + } + for h in handles { + let res = h.join().unwrap(); + println!("[{}/{}] (Tn - T0) / N = {:?}", res.0, res.1, res.2); + } + } + + #[test] + fn raw_alloc_free() { + let provider = AutoPollingProvider::new(); + let ptr: Arc> = Arc::new(AtomicPtr::new(0 as _)); + let ptr2 = Arc::clone(&ptr); + const SIZE: usize = 1024; + const ALIGN: usize = 8; + + let (tx, rx) = mpmc::bounded(1); + let cb_alloc = move |p: io::Result<*mut u8>| { + let p = p.unwrap(); + ptr2.store(p, Ordering::Relaxed); + tx.send(()).unwrap(); + }; + unsafe { + provider.raw_alloc(SIZE, ALIGN, Some(cb_alloc.into())); + } + rx.recv().unwrap(); + let p = ptr.load(Ordering::Relaxed); + assert!(!p.is_null()); + + let (tx, rx) = mpmc::bounded(1); + let cb_free = move |()| { + tx.send(()).unwrap(); + }; + unsafe { + provider.raw_free(p, SIZE, ALIGN, Some(cb_free.into())); + } + rx.recv().unwrap(); + } +} diff --git a/async-usercalls/src/test_support.rs b/async-usercalls/src/test_support.rs new file mode 100644 index 00000000..fa3b75bd --- /dev/null +++ b/async-usercalls/src/test_support.rs @@ -0,0 +1,47 @@ +use crate::AsyncUsercallProvider; +use std::ops::Deref; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; +use std::thread; + +pub(crate) struct AutoPollingProvider { + provider: AsyncUsercallProvider, + shutdown: Arc, + join_handle: Option>, +} + +impl AutoPollingProvider { + pub fn new() -> Self { + let (provider, handler) = AsyncUsercallProvider::new(); + let shutdown = Arc::new(AtomicBool::new(false)); + let shutdown1 = shutdown.clone(); + let join_handle = Some(thread::spawn(move || loop { + handler.poll(None); + if shutdown1.load(Ordering::Relaxed) { + break; + } + })); + Self { + provider, + shutdown, + join_handle, + } + } +} + +impl Deref for AutoPollingProvider { + type Target = AsyncUsercallProvider; + + fn deref(&self) -> &Self::Target { + &self.provider + } +} + +impl Drop for AutoPollingProvider { + fn drop(&mut self) { + self.shutdown.store(true, Ordering::Relaxed); + // send a usercall to ensure thread wakes up + self.provider.insecure_time(|_| {}); + self.join_handle.take().unwrap().join().unwrap(); + } +} diff --git a/async-usercalls/src/tests.rs b/async-usercalls/src/tests.rs deleted file mode 100644 index ff838c48..00000000 --- a/async-usercalls/src/tests.rs +++ /dev/null @@ -1,375 +0,0 @@ -use super::*; -use crate::hacks::MakeSend; -use crossbeam_channel as mpmc; -use std::io; -use std::net::{TcpListener, TcpStream}; -use std::ops::Deref; -use std::os::fortanix_sgx::io::AsRawFd; -use std::os::fortanix_sgx::usercalls::alloc::User; -use std::sync::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering}; -use std::sync::Arc; -use std::thread; -use std::time::{Duration, UNIX_EPOCH}; - -struct AutoPollingProvider { - provider: AsyncUsercallProvider, - shutdown: Arc, - join_handle: Option>, -} - -impl AutoPollingProvider { - fn new() -> Self { - let (provider, handler) = AsyncUsercallProvider::new(); - let shutdown = Arc::new(AtomicBool::new(false)); - let shutdown1 = shutdown.clone(); - let join_handle = Some(thread::spawn(move || loop { - handler.poll(None); - if shutdown1.load(Ordering::Relaxed) { - break; - } - })); - Self { - provider, - shutdown, - join_handle, - } - } -} - -impl Deref for AutoPollingProvider { - type Target = AsyncUsercallProvider; - - fn deref(&self) -> &Self::Target { - &self.provider - } -} - -impl Drop for AutoPollingProvider { - fn drop(&mut self) { - self.shutdown.store(true, Ordering::Relaxed); - // send a usercall to ensure thread wakes up - self.provider.insecure_time(|_| {}); - self.join_handle.take().unwrap().join().unwrap(); - } -} - -#[test] -fn get_time_async_raw() { - fn run(tid: u32, provider: AutoPollingProvider) -> (u32, u32, Duration) { - let pid = provider.provider_id(); - const N: usize = 500; - let (tx, rx) = mpmc::bounded(N); - for _ in 0..N { - let tx = tx.clone(); - let cb = move |d| { - let system_time = UNIX_EPOCH + Duration::from_nanos(d); - tx.send(system_time).unwrap(); - }; - unsafe { - provider.raw_insecure_time(Some(cb.into())); - } - } - let mut all = Vec::with_capacity(N); - for _ in 0..N { - all.push(rx.recv().unwrap()); - } - - assert_eq!(all.len(), N); - // The results are returned in arbitrary order - all.sort(); - let t0 = *all.first().unwrap(); - let tn = *all.last().unwrap(); - let total = tn.duration_since(t0).unwrap(); - (tid, pid, total / N as u32) - } - - println!(); - const THREADS: usize = 4; - let mut providers = Vec::with_capacity(THREADS); - for _ in 0..THREADS { - providers.push(AutoPollingProvider::new()); - } - let mut handles = Vec::with_capacity(THREADS); - for (i, provider) in providers.into_iter().enumerate() { - handles.push(thread::spawn(move || run(i as u32, provider))); - } - for h in handles { - let res = h.join().unwrap(); - println!("[{}/{}] (Tn - T0) / N = {:?}", res.0, res.1, res.2); - } -} - -#[test] -fn raw_alloc_free() { - let provider = AutoPollingProvider::new(); - let ptr: Arc> = Arc::new(AtomicPtr::new(0 as _)); - let ptr2 = Arc::clone(&ptr); - const SIZE: usize = 1024; - const ALIGN: usize = 8; - - let (tx, rx) = mpmc::bounded(1); - let cb_alloc = move |p: io::Result<*mut u8>| { - let p = p.unwrap(); - ptr2.store(p, Ordering::Relaxed); - tx.send(()).unwrap(); - }; - unsafe { - provider.raw_alloc(SIZE, ALIGN, Some(cb_alloc.into())); - } - rx.recv().unwrap(); - let p = ptr.load(Ordering::Relaxed); - assert!(!p.is_null()); - - let (tx, rx) = mpmc::bounded(1); - let cb_free = move |()| { - tx.send(()).unwrap(); - }; - unsafe { - provider.raw_free(p, SIZE, ALIGN, Some(cb_free.into())); - } - rx.recv().unwrap(); -} - -#[test] -fn cancel_accept() { - let provider = AutoPollingProvider::new(); - let port = 6688; - let addr = format!("0.0.0.0:{}", port); - let (tx, rx) = mpmc::bounded(1); - provider.bind_stream(&addr, move |res| { - tx.send(res).unwrap(); - }); - let bind_res = rx.recv().unwrap(); - let listener = bind_res.unwrap(); - let fd = listener.as_raw_fd(); - let accept_count = Arc::new(AtomicUsize::new(0)); - let accept_count1 = Arc::clone(&accept_count); - let (tx, rx) = mpmc::bounded(1); - let accept = provider.accept_stream(fd, move |res| { - if let Ok(_) = res { - accept_count1.fetch_add(1, Ordering::Relaxed); - } - tx.send(()).unwrap(); - }); - accept.cancel(); - thread::sleep(Duration::from_millis(10)); - let _ = TcpStream::connect(&addr); - let _ = rx.recv(); - assert_eq!(accept_count.load(Ordering::Relaxed), 0); -} - -#[test] -fn connect() { - let listener = TcpListener::bind("0.0.0.0:0").unwrap(); - let addr = listener.local_addr().unwrap().to_string(); - let provider = AutoPollingProvider::new(); - let (tx, rx) = mpmc::bounded(1); - provider.connect_stream(&addr, move |res| { - tx.send(res).unwrap(); - }); - let res = rx.recv().unwrap(); - assert!(res.is_ok()); -} - -#[test] -fn safe_alloc_free() { - let provider = AutoPollingProvider::new(); - - const LEN: usize = 64 * 1024; - let (tx, rx) = mpmc::bounded(1); - provider.alloc_slice::(LEN, move |res| { - let buf = res.expect("failed to allocate memory"); - tx.send(MakeSend::new(buf)).unwrap(); - }); - let user_buf = rx.recv().unwrap().into_inner(); - assert_eq!(user_buf.len(), LEN); - - let (tx, rx) = mpmc::bounded(1); - let cb = move || { - tx.send(()).unwrap(); - }; - provider.free(user_buf, Some(cb)); - rx.recv().unwrap(); -} - -#[test] -fn write_buffer_basic() { - const LENGTH: usize = 1024; - let mut write_buffer = WriteBuffer::new(User::<[u8]>::uninitialized(1024)); - - let buf = vec![0u8; LENGTH]; - assert_eq!(write_buffer.write(&buf), LENGTH); - assert_eq!(write_buffer.write(&buf), 0); - - let chunk = write_buffer.consumable_chunk().unwrap(); - write_buffer.consume(chunk, 200); - assert_eq!(write_buffer.write(&buf), 200); - assert_eq!(write_buffer.write(&buf), 0); -} - -#[test] -#[should_panic] -fn call_consumable_chunk_twice() { - const LENGTH: usize = 1024; - let mut write_buffer = WriteBuffer::new(User::<[u8]>::uninitialized(1024)); - - let buf = vec![0u8; LENGTH]; - assert_eq!(write_buffer.write(&buf), LENGTH); - assert_eq!(write_buffer.write(&buf), 0); - - let chunk1 = write_buffer.consumable_chunk().unwrap(); - let _ = write_buffer.consumable_chunk().unwrap(); - drop(chunk1); -} - -#[test] -#[should_panic] -fn consume_wrong_buf() { - const LENGTH: usize = 1024; - let mut write_buffer = WriteBuffer::new(User::<[u8]>::uninitialized(1024)); - - let buf = vec![0u8; LENGTH]; - assert_eq!(write_buffer.write(&buf), LENGTH); - assert_eq!(write_buffer.write(&buf), 0); - - let unrelated_buf: UserBuf = User::<[u8]>::uninitialized(512).into(); - write_buffer.consume(unrelated_buf, 100); -} - -#[test] -fn read_buffer_basic() { - let mut buf = User::<[u8]>::uninitialized(64); - const DATA: &'static [u8] = b"hello"; - buf[0..DATA.len()].copy_from_enclave(DATA); - - let mut read_buffer = ReadBuffer::new(buf, DATA.len()); - assert_eq!(read_buffer.len(), DATA.len()); - assert_eq!(read_buffer.remaining_bytes(), DATA.len()); - let mut buf = [0u8; 8]; - assert_eq!(read_buffer.read(&mut buf), DATA.len()); - assert_eq!(read_buffer.remaining_bytes(), 0); - assert_eq!(&buf, b"hello\0\0\0"); -} - -#[test] -fn callback_handler_waker() { - let (_provider, handler) = AsyncUsercallProvider::new(); - let waker = handler.waker(); - let (tx, rx) = mpmc::bounded(1); - let h = thread::spawn(move || { - let n1 = handler.poll(None); - tx.send(()).unwrap(); - let n2 = handler.poll(Some(Duration::from_secs(3))); - tx.send(()).unwrap(); - n1 + n2 - }); - for _ in 0..2 { - waker.wake(); - rx.recv().unwrap(); - } - assert_eq!(h.join().unwrap(), 0); -} - -#[test] -#[ignore] -fn echo() { - println!(); - let provider = Arc::new(AutoPollingProvider::new()); - const ADDR: &'static str = "0.0.0.0:7799"; - let (tx, rx) = mpmc::bounded(1); - provider.bind_stream(ADDR, move |res| { - tx.send(res).unwrap(); - }); - let bind_res = rx.recv().unwrap(); - let listener = bind_res.unwrap(); - println!("bind done: {:?}", listener); - let fd = listener.as_raw_fd(); - let cb = KeepAccepting { - listener, - provider: Arc::clone(&provider), - }; - provider.accept_stream(fd, cb); - thread::sleep(Duration::from_secs(60)); -} - -struct KeepAccepting { - listener: TcpListener, - provider: Arc, -} - -impl FnOnce<(io::Result,)> for KeepAccepting { - type Output = (); - - extern "rust-call" fn call_once(self, args: (io::Result,)) -> Self::Output { - let res = args.0; - println!("accept result: {:?}", res); - if let Ok(stream) = res { - let fd = stream.as_raw_fd(); - let cb = Echo { - stream, - read: true, - provider: self.provider.clone(), - }; - self.provider - .read(fd, User::<[u8]>::uninitialized(Echo::READ_BUF_SIZE), cb); - } - let provider = Arc::clone(&self.provider); - provider.accept_stream(self.listener.as_raw_fd(), self); - } -} - -struct Echo { - stream: TcpStream, - read: bool, - provider: Arc, -} - -impl Echo { - const READ_BUF_SIZE: usize = 1024; - - fn close(self) { - let fd = self.stream.as_raw_fd(); - println!("connection closed, fd = {}", fd); - self.provider.close(fd, None::>); - } -} - -// read callback -impl FnOnce<(io::Result, User<[u8]>)> for Echo { - type Output = (); - - extern "rust-call" fn call_once(mut self, args: (io::Result, User<[u8]>)) -> Self::Output { - let (res, user) = args; - assert!(self.read); - match res { - Ok(len) if len > 0 => { - self.read = false; - let provider = Arc::clone(&self.provider); - provider.write(self.stream.as_raw_fd(), (user, 0..len).into(), self); - } - _ => self.close(), - } - } -} - -// write callback -impl FnOnce<(io::Result, UserBuf)> for Echo { - type Output = (); - - extern "rust-call" fn call_once(mut self, args: (io::Result, UserBuf)) -> Self::Output { - let (res, _) = args; - assert!(!self.read); - match res { - Ok(len) if len > 0 => { - self.read = true; - let provider = Arc::clone(&self.provider); - provider.read( - self.stream.as_raw_fd(), - User::<[u8]>::uninitialized(Echo::READ_BUF_SIZE), - self, - ); - } - _ => self.close(), - } - } -} From f5f219934a7c41220d3147b0d308d4fb786abc09 Mon Sep 17 00:00:00 2001 From: Mohsen Zohrevandi Date: Fri, 26 Feb 2021 17:15:36 -0800 Subject: [PATCH 11/22] Use invoke_with_usercalls macro to define Callback enum --- async-usercalls/src/callback.rs | 102 ++++++++++++-------------------- async-usercalls/src/raw.rs | 20 +++---- 2 files changed, 49 insertions(+), 73 deletions(-) diff --git a/async-usercalls/src/callback.rs b/async-usercalls/src/callback.rs index 369ca2b8..7586ebb0 100644 --- a/async-usercalls/src/callback.rs +++ b/async-usercalls/src/callback.rs @@ -1,6 +1,6 @@ use crate::duplicated::{FromSgxResult, ReturnValue}; use crate::hacks::Return; -use fortanix_sgx_abi::{Fd, Result as SxgResult}; +use fortanix_sgx_abi::{invoke_with_usercalls, Fd, Result}; use std::io; pub struct CbFn(Box); @@ -20,70 +20,46 @@ where } } -pub(crate) enum Callback { - Read(CbFn>), - Write(CbFn>), - Flush(CbFn>), - Close(CbFn<()>), - BindStream(CbFn>), - AcceptStream(CbFn>), - ConnectStream(CbFn>), - InsecureTime(CbFn), - Alloc(CbFn>), - Free(CbFn<()>), +macro_rules! cbfn_type { + ( ) => { CbFn<()> }; + ( -> ! ) => { () }; + ( -> u64 ) => { CbFn }; + ( -> (Result, usize) ) => { CbFn> }; + ( -> (Result, u64) ) => { CbFn> }; + ( -> (Result, Fd) ) => { CbFn> }; + ( -> (Result, *mut u8) ) => { CbFn> }; + ( -> Result ) => { CbFn> }; } -impl Callback { - pub(crate) fn call(self, ret: Return) { - use Callback::*; - match self { - Read(cb) => { - let x: (SxgResult, usize) = ReturnValue::from_registers("read", (ret.0, ret.1)); - let x = x.from_sgx_result(); - cb.call(x); - } - Write(cb) => { - let x: (SxgResult, usize) = ReturnValue::from_registers("write", (ret.0, ret.1)); - let x = x.from_sgx_result(); - cb.call(x); - } - Flush(cb) => { - let x: SxgResult = ReturnValue::from_registers("flush", (ret.0, ret.1)); - let x = x.from_sgx_result(); - cb.call(x); - } - Close(cb) => { - assert_eq!((ret.0, ret.1), (0, 0)); - cb.call(()); - } - BindStream(cb) => { - let x: (SxgResult, Fd) = ReturnValue::from_registers("bind_stream", (ret.0, ret.1)); - let x = x.from_sgx_result(); - cb.call(x); - } - AcceptStream(cb) => { - let x: (SxgResult, Fd) = ReturnValue::from_registers("accept_stream", (ret.0, ret.1)); - let x = x.from_sgx_result(); - cb.call(x); - } - ConnectStream(cb) => { - let x: (SxgResult, Fd) = ReturnValue::from_registers("connect_stream", (ret.0, ret.1)); - let x = x.from_sgx_result(); - cb.call(x); - } - InsecureTime(cb) => { - let x: u64 = ReturnValue::from_registers("insecure_time", (ret.0, ret.1)); - cb.call(x); - } - Alloc(cb) => { - let x: (SxgResult, *mut u8) = ReturnValue::from_registers("alloc", (ret.0, ret.1)); - let x = x.from_sgx_result(); - cb.call(x); - } - Free(cb) => { - assert_eq!((ret.0, ret.1), (0, 0)); - cb.call(()); +macro_rules! call_cbfn { + ( $cb:ident, $rv:expr, ) => { let x: () = $rv; $cb.call(x); }; + ( $cb:ident, $rv:expr, -> ! ) => { let _: ! = $rv; }; + ( $cb:ident, $rv:expr, -> u64 ) => { let x: u64 = $rv; $cb.call(x); }; + ( $cb:ident, $rv:expr, -> $t:ty ) => { let x: $t = $rv; $cb.call(x.from_sgx_result()); }; +} + +macro_rules! define_callback { + ($(fn $name:ident($($n:ident: $t:ty),*) $(-> $r:tt)*; )*) => { + #[allow(unused)] + #[allow(non_camel_case_types)] + pub(crate) enum Callback { + $( $name(cbfn_type! { $(-> $r)* }), )* + } + + impl Callback { + pub(crate) fn call(self, ret: Return) { + match self {$( + Callback::$name(_cb) => { + call_cbfn!( + _cb, + ReturnValue::from_registers(stringify!($name), (ret.0, ret.1)), + $(-> $r)* + ); + } + )*} } } - } + }; } + +invoke_with_usercalls!(define_callback); diff --git a/async-usercalls/src/raw.rs b/async-usercalls/src/raw.rs index d516bc69..7edaa7eb 100644 --- a/async-usercalls/src/raw.rs +++ b/async-usercalls/src/raw.rs @@ -68,7 +68,7 @@ impl RawApi for AsyncUsercallProvider { callback: Option>>, ) -> CancelHandle { let u = Usercall(UsercallNrs::read as _, fd as _, buf as _, len as _, 0); - self.send_usercall(u, callback.map(|cb| Callback::Read(cb))) + self.send_usercall(u, callback.map(|cb| Callback::read(cb))) } unsafe fn raw_write( @@ -79,17 +79,17 @@ impl RawApi for AsyncUsercallProvider { callback: Option>>, ) -> CancelHandle { let u = Usercall(UsercallNrs::write as _, fd as _, buf as _, len as _, 0); - self.send_usercall(u, callback.map(|cb| Callback::Write(cb))) + self.send_usercall(u, callback.map(|cb| Callback::write(cb))) } unsafe fn raw_flush(&self, fd: Fd, callback: Option>>) { let u = Usercall(UsercallNrs::flush as _, fd as _, 0, 0, 0); - self.send_usercall(u, callback.map(|cb| Callback::Flush(cb))); + self.send_usercall(u, callback.map(|cb| Callback::flush(cb))); } unsafe fn raw_close(&self, fd: Fd, callback: Option>) { let u = Usercall(UsercallNrs::close as _, fd as _, 0, 0, 0); - self.send_usercall(u, callback.map(|cb| Callback::Close(cb))); + self.send_usercall(u, callback.map(|cb| Callback::close(cb))); } unsafe fn raw_bind_stream( @@ -100,7 +100,7 @@ impl RawApi for AsyncUsercallProvider { callback: Option>>, ) { let u = Usercall(UsercallNrs::bind_stream as _, addr as _, len as _, local_addr as _, 0); - self.send_usercall(u, callback.map(|cb| Callback::BindStream(cb))); + self.send_usercall(u, callback.map(|cb| Callback::bind_stream(cb))); } unsafe fn raw_accept_stream( @@ -117,7 +117,7 @@ impl RawApi for AsyncUsercallProvider { peer_addr as _, 0, ); - self.send_usercall(u, callback.map(|cb| Callback::AcceptStream(cb))) + self.send_usercall(u, callback.map(|cb| Callback::accept_stream(cb))) } unsafe fn raw_connect_stream( @@ -135,22 +135,22 @@ impl RawApi for AsyncUsercallProvider { local_addr as _, peer_addr as _, ); - self.send_usercall(u, callback.map(|cb| Callback::ConnectStream(cb))) + self.send_usercall(u, callback.map(|cb| Callback::connect_stream(cb))) } unsafe fn raw_insecure_time(&self, callback: Option>) { let u = Usercall(UsercallNrs::insecure_time as _, 0, 0, 0, 0); - self.send_usercall(u, callback.map(|cb| Callback::InsecureTime(cb))); + self.send_usercall(u, callback.map(|cb| Callback::insecure_time(cb))); } unsafe fn raw_alloc(&self, size: usize, alignment: usize, callback: Option>>) { let u = Usercall(UsercallNrs::alloc as _, size as _, alignment as _, 0, 0); - self.send_usercall(u, callback.map(|cb| Callback::Alloc(cb))); + self.send_usercall(u, callback.map(|cb| Callback::alloc(cb))); } unsafe fn raw_free(&self, ptr: *mut u8, size: usize, alignment: usize, callback: Option>) { let u = Usercall(UsercallNrs::free as _, ptr as _, size as _, alignment as _, 0); - self.send_usercall(u, callback.map(|cb| Callback::Free(cb))); + self.send_usercall(u, callback.map(|cb| Callback::free(cb))); } } From 6e5617ee08dbd06bd1f02acbfcb43ab3c5651984 Mon Sep 17 00:00:00 2001 From: Mohsen Zohrevandi Date: Mon, 29 Mar 2021 14:09:57 -0700 Subject: [PATCH 12/22] Reduce batch size in CallbackHandler --- async-usercalls/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/async-usercalls/src/lib.rs b/async-usercalls/src/lib.rs index eccc8f88..5ac00326 100644 --- a/async-usercalls/src/lib.rs +++ b/async-usercalls/src/lib.rs @@ -126,7 +126,7 @@ pub struct CallbackHandler { } impl CallbackHandler { - const RECV_BATCH_SIZE: usize = 1024; + const RECV_BATCH_SIZE: usize = 128; // Returns an object that can be used to interrupt a blocked `self.poll()`. pub fn waker(&self) -> CallbackHandlerWaker { From 4cf2a8e12912bfd5e0ce8ef7fcf8f607110dfda2 Mon Sep 17 00:00:00 2001 From: Raoul Strackx Date: Mon, 7 Jun 2021 17:25:12 +0200 Subject: [PATCH 13/22] Avoiding use of LLVM reserved register `rbx` --- enclave-runner/src/tcs.rs | 4 +++- enclave-runner/src/usercalls/mod.rs | 9 ++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/enclave-runner/src/tcs.rs b/enclave-runner/src/tcs.rs index 35c86764..db6e8e1e 100644 --- a/enclave-runner/src/tcs.rs +++ b/enclave-runner/src/tcs.rs @@ -184,10 +184,12 @@ pub(crate) fn coenter( } else { asm!(" lea 1f(%rip), %rcx // set SGX AEP + xchg {0}, %rbx 1: enclu + xchg %rbx, {0} ", + inout(reg) tcs.address() => _, // rbx is used internally by LLVM and cannot be used as an operand for inline asm (#84658) inout("eax") Enclu::EEnter as u32 => sgx_result, - inout("rbx") tcs.address() => _, out("rcx") _, inout("rdx") p3, inout("rdi") p1, diff --git a/enclave-runner/src/usercalls/mod.rs b/enclave-runner/src/usercalls/mod.rs index 877019bb..d2ce4de9 100644 --- a/enclave-runner/src/usercalls/mod.rs +++ b/enclave-runner/src/usercalls/mod.rs @@ -1317,7 +1317,14 @@ async fn trap_attached_debugger(tcs: usize, debug_buf: *const u8) { // Synchronized unsafe { let old = signal::sigaction(signal::SIGTRAP, &sig_action).unwrap(); - asm!("int3", in("rbx") tcs, in("r10") debug_buf, options(nomem, nostack, att_syntax)); + asm!(" + xchg %rbx, {0} + int3 + xchg {0}, %rbx + ", + in(reg) tcs, // rbx is used internally by LLVM and cannot be used as an operand for inline asm (#84658) + in("r10") debug_buf, + options(nomem, nostack, att_syntax)); signal::sigaction(signal::SIGTRAP, &old).unwrap(); } } From cf85cb6b03744ac76f4d594817e5d7e5cf095a6b Mon Sep 17 00:00:00 2001 From: Mohsen Zohrevandi Date: Mon, 18 Oct 2021 18:37:58 -0700 Subject: [PATCH 14/22] Fix race condition when cancel is received before usercall `UsercallEvent::Start` was being sent in `fn handle_usercall`, which is too late. It needs to be sent before we receive the next usercall from the enclave so we can maintain the invariant that "we only need to keep track of cancels received before the actual usercall if the read position has not moved past the write position when cancel was received." --- enclave-runner/src/usercalls/mod.rs | 39 +++++++++++++++-------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/enclave-runner/src/usercalls/mod.rs b/enclave-runner/src/usercalls/mod.rs index d2ce4de9..b0870a1b 100644 --- a/enclave-runner/src/usercalls/mod.rs +++ b/enclave-runner/src/usercalls/mod.rs @@ -29,9 +29,7 @@ use libc::*; use nix::sys::signal; use tokio::io::{AsyncRead, AsyncWrite}; use tokio::stream::Stream as TokioStream; -use tokio::sync::broadcast; -use tokio::sync::mpsc as async_mpsc; -use tokio::sync::Semaphore; +use tokio::sync::{broadcast, mpsc as async_mpsc, oneshot, Semaphore}; use fortanix_sgx_abi::*; use ipc_queue::{self, DescriptorGuard, Identified, QueueEvent, WritePosition}; @@ -57,13 +55,13 @@ const CANCEL_QUEUE_SIZE: usize = USERCALL_QUEUE_SIZE * 2; enum UsercallSendData { Sync(ThreadResult, RunningTcs, RefCell<[u8; 1024]>), - Async(Identified), + Async(Identified, Option>), } // This is the same as UsercallSendData except that it can't be Sync(CoResult::Return(...), ...) enum UsercallHandleData { Sync(tcs::Usercall, RunningTcs, RefCell<[u8; 1024]>), - Async(Identified, Option>), + Async(Identified, Option>, Option>), } type EnclaveResult = StdResult<(u64, u64), EnclaveAbort>>; @@ -689,7 +687,7 @@ impl Work { } enum UsercallEvent { - Started(u64, tokio::sync::oneshot::Sender<()>), + Started(u64, oneshot::Sender<()>), Finished(u64), Cancelled(u64, WritePosition), } @@ -780,17 +778,12 @@ impl EnclaveState { mut handle_data: UsercallHandleData, ) { let notifier_rx = match handle_data { - UsercallHandleData::Async(ref usercall, Some(ref usercall_event_tx)) => { - let (notifier_tx, notifier_rx) = tokio::sync::oneshot::channel(); - usercall_event_tx.send(UsercallEvent::Started(usercall.id, notifier_tx)).ok() - .expect("failed to send usercall event"); - Some(notifier_rx) - }, + UsercallHandleData::Async(_, ref mut notifier_rx, _) => notifier_rx.take(), _ => None, }; let (parameters, mode, tcs) = match handle_data { UsercallHandleData::Sync(ref usercall, ref mut tcs, _) => (usercall.parameters(), tcs.mode.into(), Some(tcs)), - UsercallHandleData::Async(ref usercall, _) => (usercall.data.into(), ReturnSource::AsyncUsercall, None), + UsercallHandleData::Async(ref usercall, _, _) => (usercall.data.into(), ReturnSource::AsyncUsercall, None), }; let mut input = IOHandlerInput { enclave: enclave.clone(), tcs, work_sender: &work_sender }; let handler = Handler(&mut input); @@ -824,7 +817,7 @@ impl EnclaveState { entry: CoEntry::Resume(usercall, ret), }).expect("Work sender couldn't send data to receiver"); } - UsercallHandleData::Async(usercall, usercall_event_tx) => { + UsercallHandleData::Async(usercall, _, usercall_event_tx) => { if let Some(usercall_event_tx) = usercall_event_tx { usercall_event_tx.send(UsercallEvent::Finished(usercall.id)).ok() .expect("failed to send usercall event"); @@ -849,7 +842,7 @@ impl EnclaveState { } EnclavePanic::from(debug_buf) } - UsercallHandleData::Async(_, _) => { + UsercallHandleData::Async(_, _, _) => { // TODO: https://github.com/fortanix/rust-sgx/issues/235#issuecomment-641811437 EnclavePanic::DebugStr("async exit with a panic".to_owned()) } @@ -945,13 +938,21 @@ impl EnclaveState { let usercall_queue_monitor = usercall_queue_rx.position_monitor(); + let (usercall_event_tx, mut usercall_event_rx) = async_mpsc::unbounded_channel(); + let usercall_event_tx_clone = usercall_event_tx.clone(); tokio::task::spawn_local(async move { while let Ok(usercall) = usercall_queue_rx.recv().await { - let _ = io_queue_send.send(UsercallSendData::Async(usercall)); + let notifier_rx = if usercall.ignore_cancel() { + None + } else { + let (notifier_tx, notifier_rx) = oneshot::channel(); + usercall_event_tx_clone.send(UsercallEvent::Started(usercall.id, notifier_tx)).ok().expect("failed to send usercall event"); + Some(notifier_rx) + }; + let _ = io_queue_send.send(UsercallSendData::Async(usercall, notifier_rx)); } }); - let (usercall_event_tx, mut usercall_event_rx) = async_mpsc::unbounded_channel(); let usercall_event_tx_clone = usercall_event_tx.clone(); let usercall_queue_monitor_clone = usercall_queue_monitor.clone(); tokio::task::spawn_local(async move { @@ -988,9 +989,9 @@ impl EnclaveState { let enclave_clone = enclave_clone.clone(); let tx_return_channel = tx_return_channel.clone(); match work { - UsercallSendData::Async(usercall) => { + UsercallSendData::Async(usercall, notifier_rx) => { let usercall_event_tx = if usercall.ignore_cancel() { None } else { Some(usercall_event_tx.clone()) }; - let uchd = UsercallHandleData::Async(usercall, usercall_event_tx); + let uchd = UsercallHandleData::Async(usercall, notifier_rx, usercall_event_tx); let fut = Self::handle_usercall(enclave_clone, work_sender.clone(), tx_return_channel, uchd); tokio::task::spawn_local(fut); } From e1e92d3d7866c611db25c1d98c34834dc9beb373 Mon Sep 17 00:00:00 2001 From: Raoul Strackx Date: Mon, 9 May 2022 10:02:03 +0200 Subject: [PATCH 15/22] Verify allocation `FifoDescriptor` --- async-usercalls/src/hacks/async_queues.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/async-usercalls/src/hacks/async_queues.rs b/async-usercalls/src/hacks/async_queues.rs index a325a28f..5e7a9252 100644 --- a/async-usercalls/src/hacks/async_queues.rs +++ b/async-usercalls/src/hacks/async_queues.rs @@ -4,6 +4,7 @@ use fortanix_sgx_abi::FifoDescriptor; use std::num::NonZeroU64; use std::os::fortanix_sgx::usercalls; use std::os::fortanix_sgx::usercalls::raw; +use std::os::fortanix_sgx::usercalls::alloc::{UserSafeSized, User}; use std::{mem, ptr}; // TODO: remove these once support for cancel queue is added in `std::os::fortanix_sgx` @@ -26,12 +27,13 @@ pub unsafe fn async_queues( ) } -pub unsafe fn alloc_descriptor() -> *mut FifoDescriptor { - usercalls::alloc( - mem::size_of::>(), - mem::align_of::>(), - ) - .expect("failed to allocate userspace memory") as _ +pub unsafe fn alloc_descriptor() -> *mut FifoDescriptor { + #[repr(transparent)] + #[derive(Copy, Clone)] + struct WrappedFifoDescriptor(FifoDescriptor); + unsafe impl UserSafeSized for WrappedFifoDescriptor{} + + User::>::uninitialized().into_raw() as _ } pub unsafe fn to_enclave(ptr: *mut FifoDescriptor) -> FifoDescriptor { From d106b454a922db51fc5211fb8e5e676378b94319 Mon Sep 17 00:00:00 2001 From: Raoul Strackx Date: Mon, 9 May 2022 10:03:17 +0200 Subject: [PATCH 16/22] Verify offset fifo queuer --- ipc-queue/src/fifo.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/ipc-queue/src/fifo.rs b/ipc-queue/src/fifo.rs index 0a6005a0..6b9cdda8 100644 --- a/ipc-queue/src/fifo.rs +++ b/ipc-queue/src/fifo.rs @@ -100,7 +100,7 @@ impl Fifo { "Fifo len should be a power of two" ); #[cfg(target_env = "sgx")] { - use std::os::fortanix_sgx::usercalls::alloc::User; + use std::os::fortanix_sgx::usercalls::alloc::{User, UserRef}; // `fortanix_sgx_abi::WithId` is not `Copy` because it contains an `AtomicU64`. // This type has the same memory layout but is `Copy` and can be marked as @@ -118,9 +118,15 @@ impl Fifo { let _: [u8; size_of::>()] = [0u8; size_of::>()]; } + #[repr(transparent)] + #[derive(Copy, Clone)] + struct WrapUsize(usize); + unsafe impl UserSafeSized for WrapUsize{} + // check pointers are outside enclave range, etc. let data = User::<[WithId]>::from_raw_parts(descriptor.data as _, descriptor.len); mem::forget(data); + UserRef::from_ptr(descriptor.offsets as *const WrapUsize); } let data_slice = std::slice::from_raw_parts(descriptor.data, descriptor.len); Self { From 54c4623113b425d8a51488173658a8c8d121bd20 Mon Sep 17 00:00:00 2001 From: Raoul Strackx Date: Mon, 9 May 2022 14:41:57 +0200 Subject: [PATCH 17/22] Revert to older toolchain --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index db315677..2527df3a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -23,7 +23,7 @@ matrix: - libclang-3.8-dev - musl-tools rust: - - nightly + - nightly-2021-01-14 env: - RUST_BACKTRACE=1 LLVM_CONFIG_PATH=llvm-3.8-config before_script: From b730db06cfc29d1c1443f37bc66430bc7f5139d1 Mon Sep 17 00:00:00 2001 From: Raoul Strackx Date: Wed, 1 Jun 2022 13:40:24 +0200 Subject: [PATCH 18/22] Refactor ipc_queue to access userspace through a trait --- ipc-queue/src/fifo.rs | 124 ++++++++++++++++++++++--------- ipc-queue/src/interface_async.rs | 2 + ipc-queue/src/interface_sync.rs | 1 + ipc-queue/src/lib.rs | 38 ++++++++-- 4 files changed, 125 insertions(+), 40 deletions(-) diff --git a/ipc-queue/src/fifo.rs b/ipc-queue/src/fifo.rs index 6b9cdda8..1654824d 100644 --- a/ipc-queue/src/fifo.rs +++ b/ipc-queue/src/fifo.rs @@ -5,14 +5,48 @@ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use std::cell::UnsafeCell; +use std::marker::PhantomData; use std::mem; -use std::sync::atomic::{AtomicU32, AtomicU64, AtomicUsize, Ordering}; -use std::sync::Arc; +#[cfg(not(target_env = "sgx"))] +use { + std::sync::atomic::AtomicU64, + std::sync::Arc, +}; +use std::sync::atomic::{AtomicUsize, Ordering, Ordering::SeqCst}; use fortanix_sgx_abi::{FifoDescriptor, WithId}; use super::*; +// `fortanix_sgx_abi::WithId` is not `Copy` because it contains an `AtomicU64`. +// This type has the same memory layout but is `Copy` and can be marked as +// `UserSafeSized` which is needed for the `User::from_raw_parts()` below. +#[cfg(target_env = "sgx")] +#[repr(C)] +#[derive(Default, Clone, Copy)] +struct UserSafeWithId { + pub id: u64, + pub data: T, +} + +#[cfg(target_env = "sgx")] +unsafe impl UserSafeSized for UserSafeWithId {} + +#[cfg(target_env = "sgx")] +unsafe fn _sanity_check_with_id() { + use std::mem::size_of; + let _: [u8; size_of::>()] = [0u8; size_of::>()]; +} + +#[cfg(target_env = "sgx")] +#[repr(transparent)] +#[derive(Copy, Clone)] +struct WrapUsize(usize); + +#[cfg(target_env = "sgx")] +unsafe impl UserSafeSized for WrapUsize{} + +#[cfg(not(target_env = "sgx"))] pub fn bounded(len: usize, s: S) -> (Sender, Receiver) where T: Transmittable, @@ -25,6 +59,7 @@ where (tx, rx) } +#[cfg(not(target_env = "sgx"))] pub fn bounded_async(len: usize, s: S) -> (AsyncSender, AsyncReceiver) where T: Transmittable, @@ -37,11 +72,43 @@ where (tx, rx) } +#[cfg(all(test, target_env = "sgx"))] +pub(crate) fn bounded(len: usize, s: S) -> (Sender, Receiver) +where + T: Transmittable, + S: Synchronizer, +{ + use std::ops::DerefMut; + use std::os::fortanix_sgx::usercalls::alloc::User; + + // Allocate [WithId; len] in userspace + // WARNING: This creates dangling memory in userspace, use in tests only! + let mut data = User::<[UserSafeWithId]>::uninitialized(len); + data.deref_mut().iter_mut().for_each(|v| v.copy_from_enclave(&UserSafeWithId::default())); + + // WARNING: This creates dangling memory in userspace, use in tests only! + let offsets = User::::new_from_enclave(&WrapUsize(0)); + let offsets = offsets.into_raw() as *const AtomicUsize; + + let descriptor = FifoDescriptor { + data: data.into_raw() as _, + len, + offsets, + }; + + let inner = unsafe { Fifo::from_descriptor(descriptor) }; + let tx = Sender { inner: inner.clone(), synchronizer: s.clone() }; + let rx = Receiver { inner, synchronizer: s }; + (tx, rx) +} + +#[cfg(not(target_env = "sgx"))] pub(crate) struct FifoBuffer { data: Box<[WithId]>, offsets: Box, } +#[cfg(not(target_env = "sgx"))] impl FifoBuffer { fn new(len: usize) -> Self { assert!( @@ -57,16 +124,18 @@ impl FifoBuffer { } } -enum Storage { +enum Storage { + #[cfg(not(target_env = "sgx"))] Shared(Arc>), - Static, + Static(PhantomData<&'static T>), } impl Clone for Storage { fn clone(&self) -> Self { match self { + #[cfg(not(target_env = "sgx"))] Storage::Shared(arc) => Storage::Shared(arc.clone()), - Storage::Static => Storage::Static, + Storage::Static(p) => Storage::Static(*p), } } } @@ -100,42 +169,23 @@ impl Fifo { "Fifo len should be a power of two" ); #[cfg(target_env = "sgx")] { - use std::os::fortanix_sgx::usercalls::alloc::{User, UserRef}; - - // `fortanix_sgx_abi::WithId` is not `Copy` because it contains an `AtomicU64`. - // This type has the same memory layout but is `Copy` and can be marked as - // `UserSafeSized` which is needed for the `User::from_raw_parts()` below. - #[repr(C)] - #[derive(Clone, Copy)] - pub struct WithId { - pub id: u64, - pub data: T, - } - unsafe impl UserSafeSized for WithId {} - - unsafe fn _sanity_check_with_id() { - use std::mem::size_of; - let _: [u8; size_of::>()] = [0u8; size_of::>()]; - } - - #[repr(transparent)] - #[derive(Copy, Clone)] - struct WrapUsize(usize); - unsafe impl UserSafeSized for WrapUsize{} + use std::os::fortanix_sgx::usercalls::alloc::User; // check pointers are outside enclave range, etc. - let data = User::<[WithId]>::from_raw_parts(descriptor.data as _, descriptor.len); + let data = User::<[UserSafeWithId]>::from_raw_parts(descriptor.data as _, descriptor.len); mem::forget(data); UserRef::from_ptr(descriptor.offsets as *const WrapUsize); + } let data_slice = std::slice::from_raw_parts(descriptor.data, descriptor.len); Self { data: &*(data_slice as *const [WithId] as *const [UnsafeCell>]), offsets: &*descriptor.offsets, - storage: Storage::Static, + storage: Storage::Static(PhantomData::default()), } } + #[cfg(not(target_env = "sgx"))] fn from_arc(fifo: Arc>) -> Self { unsafe { Self { @@ -148,10 +198,11 @@ impl Fifo { /// Consumes `self` and returns a DescriptorGuard. /// Panics if `self` was created using `from_descriptor`. + #[cfg(not(target_env = "sgx"))] pub(crate) fn into_descriptor_guard(self) -> DescriptorGuard { let arc = match self.storage { Storage::Shared(arc) => arc, - Storage::Static => panic!("Sender/Receiver created using `from_descriptor()` cannot be turned into DescriptorGuard."), + Storage::Static(_) => panic!("Sender/Receiver created using `from_descriptor()` cannot be turned into DescriptorGuard."), }; let descriptor = FifoDescriptor { data: self.data.as_ptr() as _, @@ -183,9 +234,11 @@ impl Fifo { }; // 4. Write the data, then the `id`. - let slot = unsafe { &mut *self.data[new.write_offset()].get() }; - slot.data = val.data; - slot.id.store(val.id, Ordering::SeqCst); + unsafe { + let slot = &mut *self.data[new.write_offset()].get(); + T::write(&mut slot.data, &val.data); + slot.id.store(val.id, SeqCst); + } // 5. If the queue was empty in step 1, signal the reader to wake up. Ok(was_empty) @@ -216,8 +269,9 @@ impl Fifo { }; // 6. Read the data, then store `0` in the `id`. - let val = Identified { id, data: slot.data }; - slot.id.store(0, Ordering::SeqCst); + let data = unsafe { T::read(&slot.data) }; + let val = Identified { id, data }; + slot.id.store(0, SeqCst); // 7. Store the new read offset, retrieving the old offsets. let before = fetch_adjust( diff --git a/ipc-queue/src/interface_async.rs b/ipc-queue/src/interface_async.rs index 5571a763..68fd63c3 100644 --- a/ipc-queue/src/interface_async.rs +++ b/ipc-queue/src/interface_async.rs @@ -42,6 +42,7 @@ impl AsyncSender { /// Consumes `self` and returns a DescriptorGuard. /// The returned guard can be used to make `FifoDescriptor`s that remain /// valid as long as the guard is not dropped. + #[cfg(not(target_env = "sgx"))] pub fn into_descriptor_guard(self) -> DescriptorGuard { self.inner.into_descriptor_guard() } @@ -82,6 +83,7 @@ impl AsyncReceiver { /// Consumes `self` and returns a DescriptorGuard. /// The returned guard can be used to make `FifoDescriptor`s that remain /// valid as long as the guard is not dropped. + #[cfg(not(target_env = "sgx"))] pub fn into_descriptor_guard(self) -> DescriptorGuard { self.inner.into_descriptor_guard() } diff --git a/ipc-queue/src/interface_sync.rs b/ipc-queue/src/interface_sync.rs index dfed16d4..1e07cafa 100644 --- a/ipc-queue/src/interface_sync.rs +++ b/ipc-queue/src/interface_sync.rs @@ -156,6 +156,7 @@ impl<'r, T: Transmittable, S: Synchronizer> Iterator for TryIter<'r, T, S> { #[cfg(test)] mod tests { + use crate::fifo::bounded; use crate::test_support::pubsub::{Channel, Subscription}; use crate::test_support::TestValue; use crate::*; diff --git a/ipc-queue/src/lib.rs b/ipc-queue/src/lib.rs index 85b2a36b..9ced23e0 100644 --- a/ipc-queue/src/lib.rs +++ b/ipc-queue/src/lib.rs @@ -7,15 +7,22 @@ #![cfg_attr(target_env = "sgx", feature(sgx_platform))] use std::future::Future; -#[cfg(target_env = "sgx")] -use std::os::fortanix_sgx::usercalls::alloc::UserSafeSized; use std::pin::Pin; use std::sync::atomic::AtomicU32; use std::sync::Arc; use fortanix_sgx_abi::FifoDescriptor; -use self::fifo::{Fifo, FifoBuffer}; +use self::fifo::Fifo; + +#[cfg(target_env = "sgx")] +use std::os::fortanix_sgx::usercalls::alloc::{UserRef, UserSafeSized}; + +#[cfg(not(target_env = "sgx"))] +use { + std::ptr, + self::fifo::FifoBuffer, +}; mod fifo; mod interface_sync; @@ -25,17 +32,36 @@ mod position; mod test_support; #[cfg(target_env = "sgx")] -pub trait Transmittable: UserSafeSized + Default {} +pub trait Transmittable: UserSafeSized + Default { + unsafe fn write(ptr: *mut Self, val: &Self) { + UserRef::::from_mut_ptr(ptr).copy_from_enclave(val) + } + + unsafe fn read(ptr: *const Self) -> Self { + let mut data = Default::default(); + UserRef::::from_ptr(ptr).copy_to_enclave(&mut data); + data + } +} #[cfg(target_env = "sgx")] impl Transmittable for T where T: UserSafeSized + Default {} #[cfg(not(target_env = "sgx"))] -pub trait Transmittable: Copy + Sized + Default {} +pub trait Transmittable: Copy + Sized + Default { + unsafe fn write(ptr: *mut Self, val: &Self) { + ptr::write(ptr, *val); + } + + unsafe fn read(ptr: *const Self) -> Self { + ptr::read(ptr) + } +} #[cfg(not(target_env = "sgx"))] impl Transmittable for T where T: Copy + Sized + Default {} +#[cfg(not(target_env = "sgx"))] pub fn bounded(len: usize, s: S) -> (Sender, Receiver) where T: Transmittable, @@ -44,6 +70,7 @@ where self::fifo::bounded(len, s) } +#[cfg(not(target_env = "sgx"))] pub fn bounded_async(len: usize, s: S) -> (AsyncSender, AsyncReceiver) where T: Transmittable, @@ -132,6 +159,7 @@ pub struct AsyncReceiver { /// to remain valid as long as the DescriptorGuard is not dropped. pub struct DescriptorGuard { descriptor: FifoDescriptor, + #[cfg(not(target_env = "sgx"))] _fifo: Arc>, } From 8117cb389e55e0ccf20c8d3bb23cd2337e11c495 Mon Sep 17 00:00:00 2001 From: Raoul Strackx Date: Wed, 1 Jun 2022 13:56:59 +0200 Subject: [PATCH 19/22] Making `read_epoch` `AtomicU64` --- ipc-queue/src/fifo.rs | 2 +- ipc-queue/src/lib.rs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ipc-queue/src/fifo.rs b/ipc-queue/src/fifo.rs index 1654824d..8c2f4579 100644 --- a/ipc-queue/src/fifo.rs +++ b/ipc-queue/src/fifo.rs @@ -68,7 +68,7 @@ where let arc = Arc::new(FifoBuffer::new(len)); let inner = Fifo::from_arc(arc); let tx = AsyncSender { inner: inner.clone(), synchronizer: s.clone() }; - let rx = AsyncReceiver { inner, synchronizer: s, read_epoch: Arc::new(AtomicU32::new(0)) }; + let rx = AsyncReceiver { inner, synchronizer: s, read_epoch: Arc::new(AtomicU64::new(0)) }; (tx, rx) } diff --git a/ipc-queue/src/lib.rs b/ipc-queue/src/lib.rs index 9ced23e0..b67fe0ac 100644 --- a/ipc-queue/src/lib.rs +++ b/ipc-queue/src/lib.rs @@ -8,7 +8,7 @@ use std::future::Future; use std::pin::Pin; -use std::sync::atomic::AtomicU32; +use std::sync::atomic::AtomicU64; use std::sync::Arc; use fortanix_sgx_abi::FifoDescriptor; @@ -152,7 +152,7 @@ pub struct AsyncSender { pub struct AsyncReceiver { inner: Fifo, synchronizer: S, - read_epoch: Arc, + read_epoch: Arc, } /// `DescriptorGuard` can produce a `FifoDescriptor` that is guaranteed @@ -175,7 +175,7 @@ impl DescriptorGuard { /// read to/from the queue. This is useful in case we want to know whether or /// not a particular value written to the queue has been read. pub struct PositionMonitor { - read_epoch: Arc, + read_epoch: Arc, fifo: Fifo, } From 1008a62572e340a78f78b194972690a022f7a311 Mon Sep 17 00:00:00 2001 From: Raoul Strackx Date: Tue, 12 Apr 2022 12:19:21 +0200 Subject: [PATCH 20/22] Rust nightly compatibility --- .travis.yml | 2 +- Cargo.lock | 20 +++++++------- aesm-client/src/imp/aesm_protobuf/mod.rs | 2 +- em-app/Cargo.lock | 13 +++++++-- em-app/Cargo.toml | 2 +- enclave-runner/src/lib.rs | 2 +- enclave-runner/src/loader.rs | 1 + enclave-runner/src/tcs.rs | 3 ++- enclave-runner/src/usercalls/mod.rs | 2 +- ipc-queue/src/fifo.rs | 3 +-- sgx-isa/Cargo.toml | 1 - sgx-isa/src/arch.rs | 34 ++++++++++++++---------- sgx-isa/src/lib.rs | 18 +++++-------- sgx-pkix/Cargo.toml | 2 +- sgxs-tools/src/bin/sgxs-load.rs | 33 ++++++++++++----------- 15 files changed, 77 insertions(+), 61 deletions(-) diff --git a/.travis.yml b/.travis.yml index 2527df3a..db315677 100644 --- a/.travis.yml +++ b/.travis.yml @@ -23,7 +23,7 @@ matrix: - libclang-3.8-dev - musl-tools rust: - - nightly-2021-01-14 + - nightly env: - RUST_BACKTRACE=1 LLVM_CONFIG_PATH=llvm-3.8-config before_script: diff --git a/Cargo.lock b/Cargo.lock index 1580755e..8e9faee6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,5 +1,7 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +version = 3 + [[package]] name = "addr2line" version = "0.13.0" @@ -1718,34 +1720,34 @@ dependencies = [ [[package]] name = "protobuf" -version = "2.18.0" +version = "2.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d147edb77bcccbfc81fabffdc7bd50c13e103b15ca1e27515fe40de69a5776b" +checksum = "cf7e6d18738ecd0902d30d1ad232c9125985a3422929b16c65517b38adc14f96" [[package]] name = "protobuf-codegen" -version = "2.18.0" +version = "2.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e81f70c25aab9506f87253c55f7cdcd8917635d5597382958d20025c211bbbd" +checksum = "aec1632b7c8f2e620343439a7dfd1f3c47b18906c4be58982079911482b5d707" dependencies = [ "protobuf", ] [[package]] name = "protoc" -version = "2.18.0" +version = "2.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57408af2c106a7f08cc61e15be6a31e3ace8ea26f90dd1be1ad19abf1073d36a" +checksum = "c2ef1dc036942fac2470fdb8a911f125404ee9129e9e807f3d12d8589001a38f" dependencies = [ - "log 0.4.11", + "log 0.3.9", "which", ] [[package]] name = "protoc-rust" -version = "2.18.0" +version = "2.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21c1582ff3efeccef1385b1c2dfaf4a9b5bc7794865624fc2a3ca9dff1145fa1" +checksum = "1a9e315121c8e7e21396e940a3d27f92280a6d28e3931213bf6cbfea76c5cc94" dependencies = [ "protobuf", "protobuf-codegen", diff --git a/aesm-client/src/imp/aesm_protobuf/mod.rs b/aesm-client/src/imp/aesm_protobuf/mod.rs index f835b6f2..bc13232d 100644 --- a/aesm-client/src/imp/aesm_protobuf/mod.rs +++ b/aesm-client/src/imp/aesm_protobuf/mod.rs @@ -50,7 +50,7 @@ impl AesmClient { let mut res_bytes = vec![0; res_len as usize]; sock.read_exact(&mut res_bytes)?; - let res = T::Response::from_response(protobuf::parse_from_bytes(&res_bytes))?; + let res = T::Response::from_response(Message::parse_from_bytes(&res_bytes))?; Ok(res) } diff --git a/em-app/Cargo.lock b/em-app/Cargo.lock index 93e0bdf6..23116d06 100644 --- a/em-app/Cargo.lock +++ b/em-app/Cargo.lock @@ -1,5 +1,7 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +version = 3 + [[package]] name = "aho-corasick" version = "0.5.3" @@ -168,7 +170,7 @@ dependencies = [ "pkix", "serde_bytes", "serde_json", - "sgx-isa", + "sgx-isa 0.3.3", "sgx_pkix", ] @@ -618,6 +620,13 @@ dependencies = [ "bitflags 1.2.1", ] +[[package]] +name = "sgx-isa" +version = "0.3.3" +dependencies = [ + "bitflags 1.2.1", +] + [[package]] name = "sgx_pkix" version = "0.1.0" @@ -628,7 +637,7 @@ dependencies = [ "lazy_static 1.4.0", "pkix", "quick-error", - "sgx-isa", + "sgx-isa 0.3.2", ] [[package]] diff --git a/em-app/Cargo.toml b/em-app/Cargo.toml index 3562ef71..9a4d8db2 100644 --- a/em-app/Cargo.toml +++ b/em-app/Cargo.toml @@ -17,7 +17,7 @@ mbedtls = { version = "0.7", default-features = false, features = ["sgx"] } b64-ct = "0.1.0" serde_bytes = "0.10" serde_json = "1.0" -sgx-isa = { version="0.3", features=["sgxstd"], default-features=false } +sgx-isa = { version = "0.3", path = "../sgx-isa", features = ["sgxstd"], default-features = false } em-node-agent-client = "1.0.0" sgx_pkix = "0.1.0" diff --git a/enclave-runner/src/lib.rs b/enclave-runner/src/lib.rs index d8f0657e..175d2757 100644 --- a/enclave-runner/src/lib.rs +++ b/enclave-runner/src/lib.rs @@ -4,7 +4,7 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ -#![feature(asm)] +#![deny(warnings)] #![doc( html_logo_url = "https://edp.fortanix.com/img/docs/edp-logo.svg", html_favicon_url = "https://edp.fortanix.com/favicon.ico", diff --git a/enclave-runner/src/loader.rs b/enclave-runner/src/loader.rs index 301802e6..03c7194c 100644 --- a/enclave-runner/src/loader.rs +++ b/enclave-runner/src/loader.rs @@ -102,6 +102,7 @@ impl From for EnclavePanic { #[derive(Debug)] pub(crate) struct ErasedTcs { address: *mut c_void, + #[allow(dead_code)] tcs: Box, } diff --git a/enclave-runner/src/tcs.rs b/enclave-runner/src/tcs.rs index db6e8e1e..1c2767a8 100644 --- a/enclave-runner/src/tcs.rs +++ b/enclave-runner/src/tcs.rs @@ -5,6 +5,7 @@ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use std; +use std::arch::asm; use std::cell::RefCell; use std::convert::{TryFrom, TryInto}; use std::fmt; @@ -113,7 +114,7 @@ pub(crate) fn coenter( user_handler: u64, user_data: u64, reserved: [u64; 27], - }; + } impl fmt::Debug for SgxEnclaveRun { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { diff --git a/enclave-runner/src/usercalls/mod.rs b/enclave-runner/src/usercalls/mod.rs index b0870a1b..3ffcb787 100644 --- a/enclave-runner/src/usercalls/mod.rs +++ b/enclave-runner/src/usercalls/mod.rs @@ -1318,7 +1318,7 @@ async fn trap_attached_debugger(tcs: usize, debug_buf: *const u8) { // Synchronized unsafe { let old = signal::sigaction(signal::SIGTRAP, &sig_action).unwrap(); - asm!(" + std::arch::asm!(" xchg %rbx, {0} int3 xchg {0}, %rbx diff --git a/ipc-queue/src/fifo.rs b/ipc-queue/src/fifo.rs index 8c2f4579..df9ef0d1 100644 --- a/ipc-queue/src/fifo.rs +++ b/ipc-queue/src/fifo.rs @@ -227,8 +227,7 @@ impl Fifo { // with the current offsets. If the CAS was not succesful, go to step 1. let new = current.increment_write_offset(); let current = current.as_usize(); - let prev = self.offsets.compare_and_swap(current, new.as_usize(), Ordering::SeqCst); - if prev == current { + if let Ok(_) = self.offsets.compare_exchange(current, new.as_usize(), Ordering::SeqCst, Ordering::SeqCst) { break (new, was_empty); } }; diff --git a/sgx-isa/Cargo.toml b/sgx-isa/Cargo.toml index ea9a4ca4..cf87a898 100644 --- a/sgx-isa/Cargo.toml +++ b/sgx-isa/Cargo.toml @@ -27,4 +27,3 @@ serde = { version = "1.0.104", features = ["derive"], optional = true } # MIT/Ap [features] large_array_derive = [] sgxstd = [] -nightly = [] diff --git a/sgx-isa/src/arch.rs b/sgx-isa/src/arch.rs index 68d03773..41f3a124 100644 --- a/sgx-isa/src/arch.rs +++ b/sgx-isa/src/arch.rs @@ -4,6 +4,7 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use core::mem::MaybeUninit; +use core::arch::asm; use super::Enclu; /// Wrapper struct to force 16-byte alignment. @@ -24,13 +25,15 @@ pub fn egetkey(request: &Align512<[u8; 512]>) -> Result, u32> let mut out = MaybeUninit::uninit(); let error; - llvm_asm!( - "enclu" - : "={eax}"(error) - : "{eax}"(Enclu::EGetkey), - "{rbx}"(request), - "{rcx}"(out.as_mut_ptr()) - : "flags" + asm!( + // rbx is reserved by LLVM + "xchg %rbx, {0}", + "enclu", + "mov {0}, %rbx", + inout(reg) request => _, + inlateout("eax") Enclu::EGetkey as u32 => error, + in("rcx") out.as_mut_ptr(), + options(att_syntax, nostack), ); match error { @@ -52,13 +55,16 @@ pub fn ereport( unsafe { let mut report = MaybeUninit::uninit(); - llvm_asm!( - "enclu" - : /* no output registers */ - : "{eax}"(Enclu::EReport), - "{rbx}"(targetinfo), - "{rcx}"(reportdata), - "{rdx}"(report.as_mut_ptr()) + asm!( + // rbx is reserved by LLVM + "xchg %rbx, {0}", + "enclu", + "mov {0}, %rbx", + inout(reg) targetinfo => _, + in("eax") Enclu::EReport as u32, + in("rcx") reportdata, + in("rdx") report.as_mut_ptr(), + options(att_syntax, preserves_flags, nostack), ); report.assume_init() diff --git a/sgx-isa/src/lib.rs b/sgx-isa/src/lib.rs index 01caf5b9..9ab81227 100644 --- a/sgx-isa/src/lib.rs +++ b/sgx-isa/src/lib.rs @@ -10,8 +10,6 @@ //! convenient. //! //! [isdm]: https://www-ssl.intel.com/content/www/us/en/processors/architectures-software-developer-manuals.html -#![cfg_attr(feature = "nightly", feature(llvm_asm))] - #![no_std] #![doc(html_logo_url = "https://edp.fortanix.com/img/docs/edp-logo.svg", html_favicon_url = "https://edp.fortanix.com/favicon.ico", @@ -30,9 +28,9 @@ extern crate serde; #[cfg(feature = "serde")] use serde::{Serialize, Deserialize}; -#[cfg(all(feature = "sgxstd", target_env = "sgx"))] +#[cfg(all(target_env = "sgx", feature = "sgxstd"))] use std::os::fortanix_sgx::arch; -#[cfg(all(feature = "nightly", target_env = "sgx", not(feature = "sgxstd")))] +#[cfg(all(target_env = "sgx", not(feature = "sgxstd")))] mod arch; use core::{convert::TryFrom, num::TryFromIntError, slice}; @@ -225,9 +223,7 @@ macro_rules! struct_def { }; (@align bytes $($other:tt)*) => {}; (@align type $ty:ident name $name:ident) => { - #[cfg(all(feature = "sgxstd", target_env = "sgx"))] - /// **Note.** This implementation is only available on the SGX target - /// with the `sgxstd` feature. + #[cfg(target_env = "sgx")] impl AsRef> for $name { fn as_ref(&self) -> &arch::$ty<[u8; $name::UNPADDED_SIZE]> { unsafe { @@ -678,7 +674,7 @@ impl Report { /// /// let targetinfo_self = Targetinfo::from(Report::for_self()); /// ``` - #[cfg(all(feature = "sgxstd", target_env = "sgx"))] + #[cfg(target_env = "sgx")] pub fn for_self() -> Self { let reportdata = arch::Align128([0; 64]); let targetinfo = arch::Align512([0; 512]); @@ -687,7 +683,7 @@ impl Report { Report::try_copy_from(&out.0).unwrap() } - #[cfg(all(feature = "sgxstd", target_env = "sgx"))] + #[cfg(target_env = "sgx")] pub fn for_target(targetinfo: &Targetinfo, reportdata: &[u8; 64]) -> Report { let reportdata = arch::Align128(*reportdata); let out = arch::ereport(targetinfo.as_ref(), &reportdata); @@ -700,7 +696,7 @@ impl Report { /// /// Care should be taken that `check_mac` prevents timing attacks, /// in particular that the comparison happens in constant time. - #[cfg(all(feature = "sgxstd", target_env = "sgx"))] + #[cfg(target_env = "sgx")] pub fn verify(&self, check_mac: F) -> R where F: FnOnce(&[u8; 16], &[u8; Report::TRUNCATED_SIZE], &[u8; 16]) -> R, @@ -781,7 +777,7 @@ pub struct Keyrequest { impl Keyrequest { pub const UNPADDED_SIZE: usize = 512; - #[cfg(all(feature = "sgxstd", target_env = "sgx"))] + #[cfg(target_env = "sgx")] pub fn egetkey(&self) -> Result<[u8; 16], ErrorCode> { match arch::egetkey(self.as_ref()) { Ok(k) => Ok(k.0), diff --git a/sgx-pkix/Cargo.toml b/sgx-pkix/Cargo.toml index 7dc7283b..753dcc48 100644 --- a/sgx-pkix/Cargo.toml +++ b/sgx-pkix/Cargo.toml @@ -14,6 +14,6 @@ categories = ["cryptography"] [dependencies] byteorder = "1.0" pkix = "0.1.1" -sgx-isa = "0.3" +sgx-isa = { version = "0.3", path = "../sgx-isa" } quick-error = "1.1.0" lazy_static = "1" diff --git a/sgxs-tools/src/bin/sgxs-load.rs b/sgxs-tools/src/bin/sgxs-load.rs index 41dde086..2da85d01 100644 --- a/sgxs-tools/src/bin/sgxs-load.rs +++ b/sgxs-tools/src/bin/sgxs-load.rs @@ -4,7 +4,6 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ -#![feature(llvm_asm)] extern crate aesm_client; extern crate clap; extern crate sgx_isa; @@ -29,20 +28,24 @@ use sgxs_loaders::enclaveapi::Sgx as SgxDevice; fn enclu_eenter(tcs: &mut dyn Tcs) { let result: u32; unsafe { - llvm_asm!(" - lea aep(%rip),%rcx - jmp enclu -aep: - xor %eax,%eax - jmp post -enclu: - enclu -post: -" : "={eax}"(result) - : "{eax}"(Enclu::EEnter), "{rbx}"(tcs.address()) - : "rcx" - : "volatile" - ) + std::arch::asm!(" + xchg %rbx, {0} + lea 1f(%rip),%rcx + jmp 2f +1: + xor %eax,%eax + jmp 3f +2: + enclu +3: + xchg {0}, %rbx +", + // rbx is used internally by LLVM and cannot be used as an operand for inline asm (#84658) + in(reg) tcs.address(), + inout("eax") Enclu::EEnter as u32 => result, + lateout("rcx") _, + options(nostack, att_syntax) + ); }; if result == 0 { From 367d2864031f0f84f0498909c9b2598a503ae0c2 Mon Sep 17 00:00:00 2001 From: Raoul Strackx Date: Fri, 3 Feb 2023 09:50:04 +0100 Subject: [PATCH 21/22] Enable `enclave_loader()` fallback to libsgx_enclave_common.so.1 --- dcap-ql/src/bindings/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/dcap-ql/src/bindings/mod.rs b/dcap-ql/src/bindings/mod.rs index c16e2c6e..ab4bb9ec 100644 --- a/dcap-ql/src/bindings/mod.rs +++ b/dcap-ql/src/bindings/mod.rs @@ -81,5 +81,7 @@ pub fn enclave_loader() -> Result { // so we should be able to find it already loaded. // We can't use the library from `mod dl` if `not(feature = "link")`, // because that is not the right library. - Ok(EnclaveCommonLibrary::load(Some(Dl::this().into()))?.build()) + let lib = EnclaveCommonLibrary::load(Some(Dl::this().into())) + .or(EnclaveCommonLibrary::load(None))?; + Ok(lib.build()) } From c10e404e81279f543a01b571929377b3913778ca Mon Sep 17 00:00:00 2001 From: Yuxiang Cao Date: Mon, 6 Feb 2023 12:00:08 -0800 Subject: [PATCH 22/22] ci: pin rust version to nightly-2023-01-31 to avoid error mentioned in issue #433 --- .travis.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index db315677..60fbbe84 100644 --- a/.travis.yml +++ b/.travis.yml @@ -23,7 +23,8 @@ matrix: - libclang-3.8-dev - musl-tools rust: - - nightly + # This need to change back to `nightly` after https://github.com/fortanix/rust-sgx/issues/433 is fixed + - nightly-2023-01-31 env: - RUST_BACKTRACE=1 LLVM_CONFIG_PATH=llvm-3.8-config before_script: