diff --git a/Cargo.lock b/Cargo.lock index a93078829e29b..49a925d6e3780 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -644,9 +644,9 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335" [[package]] name = "compiler_builtins" -version = "0.1.100" +version = "0.1.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6c0f24437059853f0fa64afc51f338f93647a3de4cf3358ba1bb4171a199775" +checksum = "01a6d58e9c3408138099a396a98fd0d0e6cfb25d723594d2ae48b5004513fd5b" dependencies = [ "cc", "rustc-std-workspace-core", diff --git a/library/panic_abort/src/lib.rs b/library/panic_abort/src/lib.rs index 02534491da976..0dc570b29db11 100644 --- a/library/panic_abort/src/lib.rs +++ b/library/panic_abort/src/lib.rs @@ -43,7 +43,8 @@ pub unsafe fn __rust_start_panic(_payload: &mut dyn BoxMeUp) -> u32 { libc::abort(); } } else if #[cfg(any(target_os = "hermit", - all(target_vendor = "fortanix", target_env = "sgx") + all(target_vendor = "fortanix", target_env = "sgx"), + target_os = "xous" ))] { unsafe fn abort() -> ! { // call std::sys::abort_internal diff --git a/library/std/Cargo.toml b/library/std/Cargo.toml index 33c9c6e63c157..e2b97f983d22b 100644 --- a/library/std/Cargo.toml +++ b/library/std/Cargo.toml @@ -36,8 +36,8 @@ object = { version = "0.32.0", default-features = false, optional = true, featur rand = { version = "0.8.5", default-features = false, features = ["alloc"] } rand_xorshift = "0.3.0" -[target.'cfg(any(all(target_family = "wasm", target_os = "unknown"), all(target_vendor = "fortanix", target_env = "sgx")))'.dependencies] -dlmalloc = { version = "0.2.3", features = ['rustc-dep-of-std'] } +[target.'cfg(any(all(target_family = "wasm", target_os = "unknown"), target_os = "xous", all(target_vendor = "fortanix", target_env = "sgx")))'.dependencies] +dlmalloc = { version = "0.2.4", features = ['rustc-dep-of-std'] } [target.x86_64-fortanix-unknown-sgx.dependencies] fortanix-sgx-abi = { version = "0.5.0", features = ['rustc-dep-of-std'], public = true } diff --git a/library/std/build.rs b/library/std/build.rs index ddf6e84d8d035..a81c45609ea52 100644 --- a/library/std/build.rs +++ b/library/std/build.rs @@ -37,6 +37,7 @@ fn main() { || target.contains("nintendo-3ds") || target.contains("vita") || target.contains("nto") + || target.contains("xous") // See src/bootstrap/synthetic_targets.rs || env::var("RUSTC_BOOTSTRAP_SYNTHETIC_TARGET").is_ok() { diff --git a/library/std/src/fs.rs b/library/std/src/fs.rs index adc1b85fd85fb..73cce35ac5912 100644 --- a/library/std/src/fs.rs +++ b/library/std/src/fs.rs @@ -8,7 +8,7 @@ #![stable(feature = "rust1", since = "1.0.0")] #![deny(unsafe_op_in_unsafe_fn)] -#[cfg(all(test, not(any(target_os = "emscripten", target_env = "sgx"))))] +#[cfg(all(test, not(any(target_os = "emscripten", target_env = "sgx", target_os = "xous"))))] mod tests; use crate::ffi::OsString; diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs index 55c112c7b806c..6ec158af94bf4 100644 --- a/library/std/src/lib.rs +++ b/library/std/src/lib.rs @@ -260,6 +260,7 @@ feature(slice_index_methods, coerce_unsized, sgx_platform) )] #![cfg_attr(windows, feature(round_char_boundary))] +#![cfg_attr(target_os = "xous", feature(slice_ptr_len))] // // Language features: // tidy-alphabetical-start diff --git a/library/std/src/net/tcp.rs b/library/std/src/net/tcp.rs index 32fd54c8e7510..9667d5f920e43 100644 --- a/library/std/src/net/tcp.rs +++ b/library/std/src/net/tcp.rs @@ -1,6 +1,6 @@ #![deny(unsafe_op_in_unsafe_fn)] -#[cfg(all(test, not(target_os = "emscripten")))] +#[cfg(all(test, not(any(target_os = "emscripten", target_os = "xous"))))] mod tests; use crate::io::prelude::*; diff --git a/library/std/src/net/udp.rs b/library/std/src/net/udp.rs index 5ca4ed832f340..227e418b70992 100644 --- a/library/std/src/net/udp.rs +++ b/library/std/src/net/udp.rs @@ -1,4 +1,4 @@ -#[cfg(all(test, not(any(target_os = "emscripten", target_env = "sgx"))))] +#[cfg(all(test, not(any(target_os = "emscripten", target_env = "sgx", target_os = "xous"))))] mod tests; use crate::fmt; diff --git a/library/std/src/os/mod.rs b/library/std/src/os/mod.rs index 634c3cc4a15c4..de6d784c65b1d 100644 --- a/library/std/src/os/mod.rs +++ b/library/std/src/os/mod.rs @@ -146,6 +146,8 @@ pub mod vita; pub mod vxworks; #[cfg(target_os = "watchos")] pub(crate) mod watchos; +#[cfg(target_os = "xous")] +pub mod xous; #[cfg(any(unix, target_os = "wasi", doc))] pub mod fd; diff --git a/library/std/src/os/xous/ffi.rs b/library/std/src/os/xous/ffi.rs new file mode 100644 index 0000000000000..8be7fbb102f3e --- /dev/null +++ b/library/std/src/os/xous/ffi.rs @@ -0,0 +1,647 @@ +#![allow(dead_code)] +#![allow(unused_variables)] +#![stable(feature = "rust1", since = "1.0.0")] + +#[path = "../unix/ffi/os_str.rs"] +mod os_str; + +#[stable(feature = "rust1", since = "1.0.0")] +pub use self::os_str::{OsStrExt, OsStringExt}; + +mod definitions; +#[stable(feature = "rust1", since = "1.0.0")] +pub use definitions::*; + +fn lend_mut_impl( + connection: Connection, + opcode: usize, + data: &mut [u8], + arg1: usize, + arg2: usize, + blocking: bool, +) -> Result<(usize, usize), Error> { + let mut a0 = if blocking { Syscall::SendMessage } else { Syscall::TrySendMessage } as usize; + let mut a1: usize = connection.try_into().unwrap(); + let mut a2 = InvokeType::LendMut as usize; + let a3 = opcode; + let a4 = data.as_mut_ptr() as usize; + let a5 = data.len(); + let a6 = arg1; + let a7 = arg2; + + unsafe { + core::arch::asm!( + "ecall", + inlateout("a0") a0, + inlateout("a1") a1, + inlateout("a2") a2, + inlateout("a3") a3 => _, + inlateout("a4") a4 => _, + inlateout("a5") a5 => _, + inlateout("a6") a6 => _, + inlateout("a7") a7 => _, + ) + }; + + let result = a0; + + if result == SyscallResult::MemoryReturned as usize { + Ok((a1, a2)) + } else if result == SyscallResult::Error as usize { + Err(a1.into()) + } else { + Err(Error::InternalError) + } +} + +pub(crate) fn lend_mut( + connection: Connection, + opcode: usize, + data: &mut [u8], + arg1: usize, + arg2: usize, +) -> Result<(usize, usize), Error> { + lend_mut_impl(connection, opcode, data, arg1, arg2, true) +} + +pub(crate) fn try_lend_mut( + connection: Connection, + opcode: usize, + data: &mut [u8], + arg1: usize, + arg2: usize, +) -> Result<(usize, usize), Error> { + lend_mut_impl(connection, opcode, data, arg1, arg2, false) +} + +fn lend_impl( + connection: Connection, + opcode: usize, + data: &[u8], + arg1: usize, + arg2: usize, + blocking: bool, +) -> Result<(usize, usize), Error> { + let mut a0 = if blocking { Syscall::SendMessage } else { Syscall::TrySendMessage } as usize; + let a1: usize = connection.try_into().unwrap(); + let a2 = InvokeType::Lend as usize; + let a3 = opcode; + let a4 = data.as_ptr() as usize; + let a5 = data.len(); + let mut a6 = arg1; + let mut a7 = arg2; + + unsafe { + core::arch::asm!( + "ecall", + inlateout("a0") a0, + inlateout("a1") a1 => _, + inlateout("a2") a2 => _, + inlateout("a3") a3 => _, + inlateout("a4") a4 => _, + inlateout("a5") a5 => _, + inlateout("a6") a6, + inlateout("a7") a7, + ) + }; + + let result = a0; + + if result == SyscallResult::MemoryReturned as usize { + Ok((a6, a7)) + } else if result == SyscallResult::Error as usize { + Err(a1.into()) + } else { + Err(Error::InternalError) + } +} + +pub(crate) fn lend( + connection: Connection, + opcode: usize, + data: &[u8], + arg1: usize, + arg2: usize, +) -> Result<(usize, usize), Error> { + lend_impl(connection, opcode, data, arg1, arg2, true) +} + +pub(crate) fn try_lend( + connection: Connection, + opcode: usize, + data: &[u8], + arg1: usize, + arg2: usize, +) -> Result<(usize, usize), Error> { + lend_impl(connection, opcode, data, arg1, arg2, false) +} + +fn scalar_impl(connection: Connection, args: [usize; 5], blocking: bool) -> Result<(), Error> { + let mut a0 = if blocking { Syscall::SendMessage } else { Syscall::TrySendMessage } as usize; + let mut a1: usize = connection.try_into().unwrap(); + let a2 = InvokeType::Scalar as usize; + let a3 = args[0]; + let a4 = args[1]; + let a5 = args[2]; + let a6 = args[3]; + let a7 = args[4]; + + unsafe { + core::arch::asm!( + "ecall", + inlateout("a0") a0, + inlateout("a1") a1, + inlateout("a2") a2 => _, + inlateout("a3") a3 => _, + inlateout("a4") a4 => _, + inlateout("a5") a5 => _, + inlateout("a6") a6 => _, + inlateout("a7") a7 => _, + ) + }; + + let result = a0; + + if result == SyscallResult::Ok as usize { + Ok(()) + } else if result == SyscallResult::Error as usize { + Err(a1.into()) + } else { + Err(Error::InternalError) + } +} + +pub(crate) fn scalar(connection: Connection, args: [usize; 5]) -> Result<(), Error> { + scalar_impl(connection, args, true) +} + +pub(crate) fn try_scalar(connection: Connection, args: [usize; 5]) -> Result<(), Error> { + scalar_impl(connection, args, false) +} + +fn blocking_scalar_impl( + connection: Connection, + args: [usize; 5], + blocking: bool, +) -> Result<[usize; 5], Error> { + let mut a0 = if blocking { Syscall::SendMessage } else { Syscall::TrySendMessage } as usize; + let mut a1: usize = connection.try_into().unwrap(); + let mut a2 = InvokeType::BlockingScalar as usize; + let mut a3 = args[0]; + let mut a4 = args[1]; + let mut a5 = args[2]; + let a6 = args[3]; + let a7 = args[4]; + + unsafe { + core::arch::asm!( + "ecall", + inlateout("a0") a0, + inlateout("a1") a1, + inlateout("a2") a2, + inlateout("a3") a3, + inlateout("a4") a4, + inlateout("a5") a5, + inlateout("a6") a6 => _, + inlateout("a7") a7 => _, + ) + }; + + let result = a0; + + if result == SyscallResult::Scalar1 as usize { + Ok([a1, 0, 0, 0, 0]) + } else if result == SyscallResult::Scalar2 as usize { + Ok([a1, a2, 0, 0, 0]) + } else if result == SyscallResult::Scalar5 as usize { + Ok([a1, a2, a3, a4, a5]) + } else if result == SyscallResult::Error as usize { + Err(a1.into()) + } else { + Err(Error::InternalError) + } +} + +pub(crate) fn blocking_scalar( + connection: Connection, + args: [usize; 5], +) -> Result<[usize; 5], Error> { + blocking_scalar_impl(connection, args, true) +} + +pub(crate) fn try_blocking_scalar( + connection: Connection, + args: [usize; 5], +) -> Result<[usize; 5], Error> { + blocking_scalar_impl(connection, args, false) +} + +fn connect_impl(address: ServerAddress, blocking: bool) -> Result { + let a0 = if blocking { Syscall::Connect } else { Syscall::TryConnect } as usize; + let address: [u32; 4] = address.into(); + let a1: usize = address[0].try_into().unwrap(); + let a2: usize = address[1].try_into().unwrap(); + let a3: usize = address[2].try_into().unwrap(); + let a4: usize = address[3].try_into().unwrap(); + let a5 = 0; + let a6 = 0; + let a7 = 0; + + let mut result: usize; + let mut value: usize; + + unsafe { + core::arch::asm!( + "ecall", + inlateout("a0") a0 => result, + inlateout("a1") a1 => value, + inlateout("a2") a2 => _, + inlateout("a3") a3 => _, + inlateout("a4") a4 => _, + inlateout("a5") a5 => _, + inlateout("a6") a6 => _, + inlateout("a7") a7 => _, + ) + }; + if result == SyscallResult::ConnectionId as usize { + Ok(value.try_into().unwrap()) + } else if result == SyscallResult::Error as usize { + Err(value.into()) + } else { + Err(Error::InternalError) + } +} + +/// Connect to a Xous server represented by the specified `address`. +/// +/// The current thread will block until the server is available. Returns +/// an error if the server cannot accept any more connections. +pub(crate) fn connect(address: ServerAddress) -> Result { + connect_impl(address, true) +} + +/// Attempt to connect to a Xous server represented by the specified `address`. +/// +/// If the server does not exist then None is returned. +pub(crate) fn try_connect(address: ServerAddress) -> Result, Error> { + match connect_impl(address, false) { + Ok(conn) => Ok(Some(conn)), + Err(Error::ServerNotFound) => Ok(None), + Err(e) => Err(e), + } +} + +/// Terminate the current process and return the specified code to the parent process. +pub(crate) fn exit(return_code: u32) -> ! { + let a0 = Syscall::TerminateProcess as usize; + let a1 = return_code as usize; + let a2 = 0; + let a3 = 0; + let a4 = 0; + let a5 = 0; + let a6 = 0; + let a7 = 0; + + unsafe { + core::arch::asm!( + "ecall", + in("a0") a0, + in("a1") a1, + in("a2") a2, + in("a3") a3, + in("a4") a4, + in("a5") a5, + in("a6") a6, + in("a7") a7, + ) + }; + unreachable!(); +} + +/// Suspend the current thread and allow another thread to run. This thread may +/// continue executing again immediately if there are no other threads available +/// to run on the system. +pub(crate) fn do_yield() { + let a0 = Syscall::Yield as usize; + let a1 = 0; + let a2 = 0; + let a3 = 0; + let a4 = 0; + let a5 = 0; + let a6 = 0; + let a7 = 0; + + unsafe { + core::arch::asm!( + "ecall", + inlateout("a0") a0 => _, + inlateout("a1") a1 => _, + inlateout("a2") a2 => _, + inlateout("a3") a3 => _, + inlateout("a4") a4 => _, + inlateout("a5") a5 => _, + inlateout("a6") a6 => _, + inlateout("a7") a7 => _, + ) + }; +} + +/// Allocate memory from the system. An optional physical and/or virtual address +/// may be specified in order to ensure memory is allocated at specific offsets, +/// otherwise the kernel will select an address. +/// +/// # Safety +/// +/// This function is safe unless a virtual address is specified. In that case, +/// the kernel will return an alias to the existing range. This violates Rust's +/// pointer uniqueness guarantee. +pub(crate) unsafe fn map_memory( + phys: Option>, + virt: Option>, + count: usize, + flags: MemoryFlags, +) -> Result<&'static mut [T], Error> { + let mut a0 = Syscall::MapMemory as usize; + let mut a1 = phys.map(|p| p.as_ptr() as usize).unwrap_or_default(); + let mut a2 = virt.map(|p| p.as_ptr() as usize).unwrap_or_default(); + let a3 = count * core::mem::size_of::(); + let a4 = flags.bits(); + let a5 = 0; + let a6 = 0; + let a7 = 0; + + unsafe { + core::arch::asm!( + "ecall", + inlateout("a0") a0, + inlateout("a1") a1, + inlateout("a2") a2, + inlateout("a3") a3 => _, + inlateout("a4") a4 => _, + inlateout("a5") a5 => _, + inlateout("a6") a6 => _, + inlateout("a7") a7 => _, + ) + }; + + let result = a0; + + if result == SyscallResult::MemoryRange as usize { + let start = core::ptr::from_exposed_addr_mut::(a1); + let len = a2 / core::mem::size_of::(); + let end = unsafe { start.add(len) }; + Ok(unsafe { core::slice::from_raw_parts_mut(start, len) }) + } else if result == SyscallResult::Error as usize { + Err(a1.into()) + } else { + Err(Error::InternalError) + } +} + +/// Destroy the given memory, returning it to the compiler. +/// +/// Safety: The memory pointed to by `range` should not be used after this +/// function returns, even if this function returns Err(). +pub(crate) unsafe fn unmap_memory(range: *mut [T]) -> Result<(), Error> { + let mut a0 = Syscall::UnmapMemory as usize; + let mut a1 = range.as_mut_ptr() as usize; + let a2 = range.len(); + let a3 = 0; + let a4 = 0; + let a5 = 0; + let a6 = 0; + let a7 = 0; + + unsafe { + core::arch::asm!( + "ecall", + inlateout("a0") a0, + inlateout("a1") a1, + inlateout("a2") a2 => _, + inlateout("a3") a3 => _, + inlateout("a4") a4 => _, + inlateout("a5") a5 => _, + inlateout("a6") a6 => _, + inlateout("a7") a7 => _, + ) + }; + + let result = a0; + + if result == SyscallResult::Ok as usize { + Ok(()) + } else if result == SyscallResult::Error as usize { + Err(a1.into()) + } else { + Err(Error::InternalError) + } +} + +/// Adjust the memory flags for the given range. This can be used to remove flags +/// from a given region in order to harden memory access. Note that flags may +/// only be removed and may never be added. +/// +/// Safety: The memory pointed to by `range` may become inaccessible or have its +/// mutability removed. It is up to the caller to ensure that the flags specified +/// by `new_flags` are upheld, otherwise the program will crash. +pub(crate) unsafe fn update_memory_flags( + range: *mut [T], + new_flags: MemoryFlags, +) -> Result<(), Error> { + let mut a0 = Syscall::UpdateMemoryFlags as usize; + let mut a1 = range.as_mut_ptr() as usize; + let a2 = range.len(); + let a3 = new_flags.bits(); + let a4 = 0; // Process ID is currently None + let a5 = 0; + let a6 = 0; + let a7 = 0; + + unsafe { + core::arch::asm!( + "ecall", + inlateout("a0") a0, + inlateout("a1") a1, + inlateout("a2") a2 => _, + inlateout("a3") a3 => _, + inlateout("a4") a4 => _, + inlateout("a5") a5 => _, + inlateout("a6") a6 => _, + inlateout("a7") a7 => _, + ) + }; + + let result = a0; + + if result == SyscallResult::Ok as usize { + Ok(()) + } else if result == SyscallResult::Error as usize { + Err(a1.into()) + } else { + Err(Error::InternalError) + } +} + +/// Create a thread with a given stack and up to four arguments +pub(crate) fn create_thread( + start: *mut usize, + stack: *mut [u8], + arg0: usize, + arg1: usize, + arg2: usize, + arg3: usize, +) -> Result { + let mut a0 = Syscall::CreateThread as usize; + let mut a1 = start as usize; + let a2 = stack.as_mut_ptr() as usize; + let a3 = stack.len(); + let a4 = arg0; + let a5 = arg1; + let a6 = arg2; + let a7 = arg3; + + unsafe { + core::arch::asm!( + "ecall", + inlateout("a0") a0, + inlateout("a1") a1, + inlateout("a2") a2 => _, + inlateout("a3") a3 => _, + inlateout("a4") a4 => _, + inlateout("a5") a5 => _, + inlateout("a6") a6 => _, + inlateout("a7") a7 => _, + ) + }; + + let result = a0; + + if result == SyscallResult::ThreadId as usize { + Ok(a1.into()) + } else if result == SyscallResult::Error as usize { + Err(a1.into()) + } else { + Err(Error::InternalError) + } +} + +/// Wait for the given thread to terminate and return the exit code from that thread. +pub(crate) fn join_thread(thread_id: ThreadId) -> Result { + let mut a0 = Syscall::JoinThread as usize; + let mut a1 = thread_id.into(); + let a2 = 0; + let a3 = 0; + let a4 = 0; + let a5 = 0; + let a6 = 0; + let a7 = 0; + + unsafe { + core::arch::asm!( + "ecall", + inlateout("a0") a0, + inlateout("a1") a1, + inlateout("a2") a2 => _, + inlateout("a3") a3 => _, + inlateout("a4") a4 => _, + inlateout("a5") a5 => _, + inlateout("a6") a6 => _, + inlateout("a7") a7 => _, + ) + }; + + let result = a0; + + if result == SyscallResult::Scalar1 as usize { + Ok(a1) + } else if result == SyscallResult::Scalar2 as usize { + Ok(a1) + } else if result == SyscallResult::Scalar5 as usize { + Ok(a1) + } else if result == SyscallResult::Error as usize { + Err(a1.into()) + } else { + Err(Error::InternalError) + } +} + +/// Get the current thread's ID +pub(crate) fn thread_id() -> Result { + let mut a0 = Syscall::GetThreadId as usize; + let mut a1 = 0; + let a2 = 0; + let a3 = 0; + let a4 = 0; + let a5 = 0; + let a6 = 0; + let a7 = 0; + + unsafe { + core::arch::asm!( + "ecall", + inlateout("a0") a0, + inlateout("a1") a1, + inlateout("a2") a2 => _, + inlateout("a3") a3 => _, + inlateout("a4") a4 => _, + inlateout("a5") a5 => _, + inlateout("a6") a6 => _, + inlateout("a7") a7 => _, + ) + }; + + let result = a0; + + if result == SyscallResult::ThreadId as usize { + Ok(a1.into()) + } else if result == SyscallResult::Error as usize { + Err(a1.into()) + } else { + Err(Error::InternalError) + } +} + +/// Adjust the given `knob` limit to match the new value `new`. The current value must +/// match the `current` in order for this to take effect. +/// +/// The new value is returned as a result of this call. If the call fails, then the old +/// value is returned. In either case, this function returns successfully. +/// +/// An error is generated if the `knob` is not a valid limit, or if the call +/// would not succeed. +pub(crate) fn adjust_limit(knob: Limits, current: usize, new: usize) -> Result { + let mut a0 = Syscall::JoinThread as usize; + let mut a1 = knob as usize; + let a2 = current; + let a3 = new; + let a4 = 0; + let a5 = 0; + let a6 = 0; + let a7 = 0; + + unsafe { + core::arch::asm!( + "ecall", + inlateout("a0") a0, + inlateout("a1") a1, + inlateout("a2") a2 => _, + inlateout("a3") a3 => _, + inlateout("a4") a4 => _, + inlateout("a5") a5 => _, + inlateout("a6") a6 => _, + inlateout("a7") a7 => _, + ) + }; + + let result = a0; + + if result == SyscallResult::Scalar2 as usize && a1 == knob as usize { + Ok(a2) + } else if result == SyscallResult::Scalar5 as usize && a1 == knob as usize { + Ok(a1) + } else if result == SyscallResult::Error as usize { + Err(a1.into()) + } else { + Err(Error::InternalError) + } +} diff --git a/library/std/src/os/xous/ffi/definitions.rs b/library/std/src/os/xous/ffi/definitions.rs new file mode 100644 index 0000000000000..345005bcc78d7 --- /dev/null +++ b/library/std/src/os/xous/ffi/definitions.rs @@ -0,0 +1,283 @@ +mod memoryflags; +pub(crate) use memoryflags::*; + +#[stable(feature = "rust1", since = "1.0.0")] +/// Indicates a particular syscall number as used by the Xous kernel. +#[derive(Copy, Clone)] +#[repr(usize)] +pub enum Syscall { + MapMemory = 2, + Yield = 3, + UpdateMemoryFlags = 12, + ReceiveMessage = 15, + SendMessage = 16, + Connect = 17, + CreateThread = 18, + UnmapMemory = 19, + ReturnMemory = 20, + TerminateProcess = 22, + TrySendMessage = 24, + TryConnect = 25, + GetThreadId = 32, + JoinThread = 36, + AdjustProcessLimit = 38, + ReturnScalar = 40, +} + +#[stable(feature = "rust1", since = "1.0.0")] +/// Copies of these invocation types here for when we're running +/// in environments without libxous. +#[derive(Copy, Clone)] +#[repr(usize)] +pub enum SyscallResult { + Ok = 0, + Error = 1, + MemoryRange = 3, + ConnectionId = 7, + Message = 9, + ThreadId = 10, + Scalar1 = 14, + Scalar2 = 15, + MemoryReturned = 18, + Scalar5 = 20, +} + +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Copy, Clone)] +/// A list of all known errors that may be returned by the Xous kernel. +#[repr(usize)] +pub enum Error { + NoError = 0, + BadAlignment = 1, + BadAddress = 2, + OutOfMemory = 3, + MemoryInUse = 4, + InterruptNotFound = 5, + InterruptInUse = 6, + InvalidString = 7, + ServerExists = 8, + ServerNotFound = 9, + ProcessNotFound = 10, + ProcessNotChild = 11, + ProcessTerminated = 12, + Timeout = 13, + InternalError = 14, + ServerQueueFull = 15, + ThreadNotAvailable = 16, + UnhandledSyscall = 17, + InvalidSyscall = 18, + ShareViolation = 19, + InvalidThread = 20, + InvalidPid = 21, + UnknownError = 22, + AccessDenied = 23, + UseBeforeInit = 24, + DoubleFree = 25, + DebugInProgress = 26, + InvalidLimit = 27, +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl From for Error { + fn from(src: usize) -> Self { + match src { + 0 => Self::NoError, + 1 => Self::BadAlignment, + 2 => Self::BadAddress, + 3 => Self::OutOfMemory, + 4 => Self::MemoryInUse, + 5 => Self::InterruptNotFound, + 6 => Self::InterruptInUse, + 7 => Self::InvalidString, + 8 => Self::ServerExists, + 9 => Self::ServerNotFound, + 10 => Self::ProcessNotFound, + 11 => Self::ProcessNotChild, + 12 => Self::ProcessTerminated, + 13 => Self::Timeout, + 14 => Self::InternalError, + 15 => Self::ServerQueueFull, + 16 => Self::ThreadNotAvailable, + 17 => Self::UnhandledSyscall, + 18 => Self::InvalidSyscall, + 19 => Self::ShareViolation, + 20 => Self::InvalidThread, + 21 => Self::InvalidPid, + 23 => Self::AccessDenied, + 24 => Self::UseBeforeInit, + 25 => Self::DoubleFree, + 26 => Self::DebugInProgress, + 27 => Self::InvalidLimit, + 22 | _ => Self::UnknownError, + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl From for Error { + fn from(src: i32) -> Self { + let Ok(src) = core::convert::TryInto::::try_into(src) else { + return Self::UnknownError; + }; + src.into() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl core::fmt::Display for Error { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!( + f, + "{}", + match self { + Error::NoError => "no error occurred", + Error::BadAlignment => "memory was not properly aligned", + Error::BadAddress => "an invalid address was supplied", + Error::OutOfMemory => "the process or service has run out of memory", + Error::MemoryInUse => "the requested address is in use", + Error::InterruptNotFound => + "the requested interrupt does not exist on this platform", + Error::InterruptInUse => "the requested interrupt is currently in use", + Error::InvalidString => "the specified string was not formatted correctly", + Error::ServerExists => "a server with that address already exists", + Error::ServerNotFound => "the requetsed server could not be found", + Error::ProcessNotFound => "the target process does not exist", + Error::ProcessNotChild => + "the requested operation can only be done on child processes", + Error::ProcessTerminated => "the target process has crashed", + Error::Timeout => "the requested operation timed out", + Error::InternalError => "an internal error occurred", + Error::ServerQueueFull => "the server has too many pending messages", + Error::ThreadNotAvailable => "the specified thread does not exist", + Error::UnhandledSyscall => "the kernel did not recognize that syscall", + Error::InvalidSyscall => "the syscall had incorrect parameters", + Error::ShareViolation => "an attempt was made to share memory twice", + Error::InvalidThread => "tried to resume a thread that was not ready", + Error::InvalidPid => "kernel attempted to use a pid that was not valid", + Error::AccessDenied => "no permission to perform the requested operation", + Error::UseBeforeInit => "attempt to use a service before initialization finished", + Error::DoubleFree => "the requested resource was freed twice", + Error::DebugInProgress => "kernel attempted to activate a thread being debugged", + Error::InvalidLimit => "process attempted to adjust an invalid limit", + Error::UnknownError => "an unknown error occurred", + } + ) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl core::fmt::Debug for Error { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", self) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl crate::error::Error for Error {} + +/// Indicates the type of Message that is sent when making a `SendMessage` syscall. +#[derive(Copy, Clone)] +#[repr(usize)] +pub(crate) enum InvokeType { + LendMut = 1, + Lend = 2, + Move = 3, + Scalar = 4, + BlockingScalar = 5, +} + +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Debug, Copy, Clone)] +/// A representation of a connection to a Xous service. +pub struct Connection(u32); + +#[stable(feature = "rust1", since = "1.0.0")] +impl From for Connection { + fn from(src: u32) -> Connection { + Connection(src) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl TryFrom for Connection { + type Error = core::num::TryFromIntError; + fn try_from(src: usize) -> Result { + Ok(Connection(src.try_into()?)) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Into for Connection { + fn into(self) -> u32 { + self.0 + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl TryInto for Connection { + type Error = core::num::TryFromIntError; + fn try_into(self) -> Result { + self.0.try_into() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Debug)] +pub enum ServerAddressError { + InvalidLength, +} + +#[stable(feature = "rust1", since = "1.0.0")] +pub struct ServerAddress([u32; 4]); + +#[stable(feature = "rust1", since = "1.0.0")] +impl TryFrom<&str> for ServerAddress { + type Error = ServerAddressError; + fn try_from(value: &str) -> Result { + let b = value.as_bytes(); + if b.len() == 0 || b.len() > 16 { + return Err(Self::Error::InvalidLength); + } + + let mut this_temp = [0u8; 16]; + for (dest, src) in this_temp.iter_mut().zip(b.iter()) { + *dest = *src; + } + + let mut this = [0u32; 4]; + for (dest, src) in this.iter_mut().zip(this_temp.chunks_exact(4)) { + *dest = u32::from_le_bytes(src.try_into().unwrap()); + } + Ok(ServerAddress(this)) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Into<[u32; 4]> for ServerAddress { + fn into(self) -> [u32; 4] { + self.0 + } +} + +#[derive(Debug, Copy, Clone)] +pub(crate) struct ThreadId(usize); + +impl From for ThreadId { + fn from(src: usize) -> ThreadId { + ThreadId(src) + } +} + +impl Into for ThreadId { + fn into(self) -> usize { + self.0 + } +} + +#[derive(Copy, Clone)] +#[repr(usize)] +/// Limits that can be passed to `AdjustLimit` +pub(crate) enum Limits { + HeapMaximum = 1, + HeapSize = 2, +} diff --git a/library/std/src/os/xous/ffi/definitions/memoryflags.rs b/library/std/src/os/xous/ffi/definitions/memoryflags.rs new file mode 100644 index 0000000000000..af9de3cbff29b --- /dev/null +++ b/library/std/src/os/xous/ffi/definitions/memoryflags.rs @@ -0,0 +1,176 @@ +/// Flags to be passed to the MapMemory struct. +/// Note that it is an error to have memory be +/// writable and not readable. +#[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord, Hash, Debug)] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct MemoryFlags { + bits: usize, +} + +impl MemoryFlags { + /// Free this memory + #[stable(feature = "rust1", since = "1.0.0")] + pub const FREE: Self = Self { bits: 0b0000_0000 }; + + /// Immediately allocate this memory. Otherwise it will + /// be demand-paged. This is implicitly set when `phys` + /// is not 0. + #[stable(feature = "rust1", since = "1.0.0")] + pub const RESERVE: Self = Self { bits: 0b0000_0001 }; + + /// Allow the CPU to read from this page. + #[stable(feature = "rust1", since = "1.0.0")] + pub const R: Self = Self { bits: 0b0000_0010 }; + + /// Allow the CPU to write to this page. + #[stable(feature = "rust1", since = "1.0.0")] + pub const W: Self = Self { bits: 0b0000_0100 }; + + /// Allow the CPU to execute from this page. + #[stable(feature = "rust1", since = "1.0.0")] + pub const X: Self = Self { bits: 0b0000_1000 }; + + #[stable(feature = "rust1", since = "1.0.0")] + pub fn bits(&self) -> usize { + self.bits + } + + #[stable(feature = "rust1", since = "1.0.0")] + pub fn from_bits(raw: usize) -> Option { + if raw > 16 { None } else { Some(MemoryFlags { bits: raw }) } + } + + #[stable(feature = "rust1", since = "1.0.0")] + pub fn is_empty(&self) -> bool { + self.bits == 0 + } + + #[stable(feature = "rust1", since = "1.0.0")] + pub fn empty() -> MemoryFlags { + MemoryFlags { bits: 0 } + } + + #[stable(feature = "rust1", since = "1.0.0")] + pub fn all() -> MemoryFlags { + MemoryFlags { bits: 15 } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl core::fmt::Binary for MemoryFlags { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + core::fmt::Binary::fmt(&self.bits, f) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl core::fmt::Octal for MemoryFlags { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + core::fmt::Octal::fmt(&self.bits, f) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl core::fmt::LowerHex for MemoryFlags { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + core::fmt::LowerHex::fmt(&self.bits, f) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl core::fmt::UpperHex for MemoryFlags { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + core::fmt::UpperHex::fmt(&self.bits, f) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl core::ops::BitOr for MemoryFlags { + type Output = Self; + + /// Returns the union of the two sets of flags. + #[inline] + fn bitor(self, other: MemoryFlags) -> Self { + Self { bits: self.bits | other.bits } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl core::ops::BitOrAssign for MemoryFlags { + /// Adds the set of flags. + #[inline] + fn bitor_assign(&mut self, other: Self) { + self.bits |= other.bits; + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl core::ops::BitXor for MemoryFlags { + type Output = Self; + + /// Returns the left flags, but with all the right flags toggled. + #[inline] + fn bitxor(self, other: Self) -> Self { + Self { bits: self.bits ^ other.bits } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl core::ops::BitXorAssign for MemoryFlags { + /// Toggles the set of flags. + #[inline] + fn bitxor_assign(&mut self, other: Self) { + self.bits ^= other.bits; + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl core::ops::BitAnd for MemoryFlags { + type Output = Self; + + /// Returns the intersection between the two sets of flags. + #[inline] + fn bitand(self, other: Self) -> Self { + Self { bits: self.bits & other.bits } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl core::ops::BitAndAssign for MemoryFlags { + /// Disables all flags disabled in the set. + #[inline] + fn bitand_assign(&mut self, other: Self) { + self.bits &= other.bits; + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl core::ops::Sub for MemoryFlags { + type Output = Self; + + /// Returns the set difference of the two sets of flags. + #[inline] + fn sub(self, other: Self) -> Self { + Self { bits: self.bits & !other.bits } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl core::ops::SubAssign for MemoryFlags { + /// Disables all flags enabled in the set. + #[inline] + fn sub_assign(&mut self, other: Self) { + self.bits &= !other.bits; + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl core::ops::Not for MemoryFlags { + type Output = Self; + + /// Returns the complement of this set of flags. + #[inline] + fn not(self) -> Self { + Self { bits: !self.bits } & MemoryFlags { bits: 15 } + } +} diff --git a/library/std/src/os/xous/mod.rs b/library/std/src/os/xous/mod.rs new file mode 100644 index 0000000000000..153694a89a78d --- /dev/null +++ b/library/std/src/os/xous/mod.rs @@ -0,0 +1,17 @@ +#![stable(feature = "rust1", since = "1.0.0")] +#![doc(cfg(target_os = "xous"))] + +pub mod ffi; + +#[stable(feature = "rust1", since = "1.0.0")] +pub mod services; + +/// A prelude for conveniently writing platform-specific code. +/// +/// Includes all extension traits, and some important type definitions. +#[stable(feature = "rust1", since = "1.0.0")] +pub mod prelude { + #[doc(no_inline)] + #[stable(feature = "rust1", since = "1.0.0")] + pub use super::ffi::{OsStrExt, OsStringExt}; +} diff --git a/library/std/src/os/xous/services.rs b/library/std/src/os/xous/services.rs new file mode 100644 index 0000000000000..5c219f1fbb95e --- /dev/null +++ b/library/std/src/os/xous/services.rs @@ -0,0 +1,132 @@ +use crate::os::xous::ffi::Connection; +use core::sync::atomic::{AtomicU32, Ordering}; + +mod log; +pub(crate) use log::*; + +mod systime; +pub(crate) use systime::*; + +mod ticktimer; +pub(crate) use ticktimer::*; + +mod ns { + const NAME_MAX_LENGTH: usize = 64; + use crate::os::xous::ffi::{lend_mut, Connection}; + // By making this repr(C), the layout of this struct becomes well-defined + // and no longer shifts around. + // By marking it as `align(4096)` we define that it will be page-aligned, + // meaning it can be sent between processes. We make sure to pad out the + // entire struct so that memory isn't leaked to the name server. + #[repr(C, align(4096))] + struct ConnectRequest { + data: [u8; 4096], + } + + impl ConnectRequest { + pub fn new(name: &str) -> Self { + let mut cr = ConnectRequest { data: [0u8; 4096] }; + let name_bytes = name.as_bytes(); + + // Copy the string into our backing store. + for (&src_byte, dest_byte) in name_bytes.iter().zip(&mut cr.data[0..NAME_MAX_LENGTH]) { + *dest_byte = src_byte; + } + + // Set the string length to the length of the passed-in String, + // or the maximum possible length. Which ever is smaller. + for (&src_byte, dest_byte) in (name.len().min(NAME_MAX_LENGTH) as u32) + .to_le_bytes() + .iter() + .zip(&mut cr.data[NAME_MAX_LENGTH..]) + { + *dest_byte = src_byte; + } + cr + } + } + + pub fn connect_with_name_impl(name: &str, blocking: bool) -> Option { + let mut request = ConnectRequest::new(name); + let opcode = if blocking { + 6 /* BlockingConnect */ + } else { + 7 /* TryConnect */ + }; + let cid = if blocking { super::name_server() } else { super::try_name_server()? }; + + lend_mut(cid, opcode, &mut request.data, 0, name.len().min(NAME_MAX_LENGTH)) + .expect("unable to perform lookup"); + + // Read the result code back from the nameserver + let result = u32::from_le_bytes(request.data[0..4].try_into().unwrap()); + if result == 0 { + // If the result was successful, then the CID is stored in the next 4 bytes + Some(u32::from_le_bytes(request.data[4..8].try_into().unwrap()).into()) + } else { + None + } + } + + pub fn connect_with_name(name: &str) -> Option { + connect_with_name_impl(name, true) + } + + pub fn try_connect_with_name(name: &str) -> Option { + connect_with_name_impl(name, false) + } +} + +/// Attempt to connect to a server by name. If the server does not exist, this will +/// block until the server is created. +/// +/// Note that this is different from connecting to a server by address. Server +/// addresses are always 16 bytes long, whereas server names are arbitrary-length +/// strings up to 64 bytes in length. +#[stable(feature = "rust1", since = "1.0.0")] +pub fn connect(name: &str) -> Option { + ns::connect_with_name(name) +} + +/// Attempt to connect to a server by name. If the server does not exist, this will +/// immediately return `None`. +/// +/// Note that this is different from connecting to a server by address. Server +/// addresses are always 16 bytes long, whereas server names are arbitrary-length +/// strings. +#[stable(feature = "rust1", since = "1.0.0")] +pub fn try_connect(name: &str) -> Option { + ns::try_connect_with_name(name) +} + +static NAME_SERVER_CONNECTION: AtomicU32 = AtomicU32::new(0); + +/// Return a `Connection` to the name server. If the name server has not been started, +/// then this call will block until the name server has been started. The `Connection` +/// will be shared among all connections in a process, so it is safe to call this +/// multiple times. +pub(crate) fn name_server() -> Connection { + let cid = NAME_SERVER_CONNECTION.load(Ordering::Relaxed); + if cid != 0 { + return cid.into(); + } + + let cid = crate::os::xous::ffi::connect("xous-name-server".try_into().unwrap()).unwrap(); + NAME_SERVER_CONNECTION.store(cid.into(), Ordering::Relaxed); + cid +} + +fn try_name_server() -> Option { + let cid = NAME_SERVER_CONNECTION.load(Ordering::Relaxed); + if cid != 0 { + return Some(cid.into()); + } + + if let Ok(Some(cid)) = crate::os::xous::ffi::try_connect("xous-name-server".try_into().unwrap()) + { + NAME_SERVER_CONNECTION.store(cid.into(), Ordering::Relaxed); + Some(cid) + } else { + None + } +} diff --git a/library/std/src/os/xous/services/log.rs b/library/std/src/os/xous/services/log.rs new file mode 100644 index 0000000000000..e6bae929eac0e --- /dev/null +++ b/library/std/src/os/xous/services/log.rs @@ -0,0 +1,63 @@ +use crate::os::xous::ffi::Connection; +use core::sync::atomic::{AtomicU32, Ordering}; + +/// Group `usize` bytes into a `usize` and return it, beginning +/// from `offset` * sizeof(usize) bytes from the start. For example, +/// `group_or_null([1,2,3,4,5,6,7,8], 1)` on a 32-bit system will +/// return a usize with 5678 packed into it. +fn group_or_null(data: &[u8], offset: usize) -> usize { + let start = offset * core::mem::size_of::(); + let mut out_array = [0u8; core::mem::size_of::()]; + if start < data.len() { + for (dest, src) in out_array.iter_mut().zip(&data[start..]) { + *dest = *src; + } + } + usize::from_le_bytes(out_array) +} + +pub(crate) enum LogScalar<'a> { + /// A panic occurred, and a panic log is forthcoming + BeginPanic, + + /// Some number of bytes will be appended to the log message + AppendPanicMessage(&'a [u8]), +} + +impl<'a> Into<[usize; 5]> for LogScalar<'a> { + fn into(self) -> [usize; 5] { + match self { + LogScalar::BeginPanic => [1000, 0, 0, 0, 0], + LogScalar::AppendPanicMessage(c) => + // Text is grouped into 4x `usize` words. The id is 1100 plus + // the number of characters in this message. + // Ignore errors since we're already panicking. + { + [ + 1100 + c.len(), + group_or_null(&c, 0), + group_or_null(&c, 1), + group_or_null(&c, 2), + group_or_null(&c, 3), + ] + } + } + } +} + +/// Return a `Connection` to the log server, which is used for printing messages to +/// the console and reporting panics. If the log server has not yet started, this +/// will block until the server is running. It is safe to call this multiple times, +/// because the address is shared among all threads in a process. +pub(crate) fn log_server() -> Connection { + static LOG_SERVER_CONNECTION: AtomicU32 = AtomicU32::new(0); + + let cid = LOG_SERVER_CONNECTION.load(Ordering::Relaxed); + if cid != 0 { + return cid.into(); + } + + let cid = crate::os::xous::ffi::connect("xous-log-server ".try_into().unwrap()).unwrap(); + LOG_SERVER_CONNECTION.store(cid.into(), Ordering::Relaxed); + cid +} diff --git a/library/std/src/os/xous/services/systime.rs b/library/std/src/os/xous/services/systime.rs new file mode 100644 index 0000000000000..bbb875c69426e --- /dev/null +++ b/library/std/src/os/xous/services/systime.rs @@ -0,0 +1,28 @@ +use crate::os::xous::ffi::{connect, Connection}; +use core::sync::atomic::{AtomicU32, Ordering}; + +pub(crate) enum SystimeScalar { + GetUtcTimeMs, +} + +impl Into<[usize; 5]> for SystimeScalar { + fn into(self) -> [usize; 5] { + match self { + SystimeScalar::GetUtcTimeMs => [3, 0, 0, 0, 0], + } + } +} + +/// Return a `Connection` to the systime server. This server is used for reporting the +/// realtime clock. +pub(crate) fn systime_server() -> Connection { + static SYSTIME_SERVER_CONNECTION: AtomicU32 = AtomicU32::new(0); + let cid = SYSTIME_SERVER_CONNECTION.load(Ordering::Relaxed); + if cid != 0 { + return cid.into(); + } + + let cid = connect("timeserverpublic".try_into().unwrap()).unwrap(); + SYSTIME_SERVER_CONNECTION.store(cid.into(), Ordering::Relaxed); + cid +} diff --git a/library/std/src/os/xous/services/ticktimer.rs b/library/std/src/os/xous/services/ticktimer.rs new file mode 100644 index 0000000000000..7759303fdbe23 --- /dev/null +++ b/library/std/src/os/xous/services/ticktimer.rs @@ -0,0 +1,42 @@ +use crate::os::xous::ffi::Connection; +use core::sync::atomic::{AtomicU32, Ordering}; + +pub(crate) enum TicktimerScalar { + ElapsedMs, + SleepMs(usize), + LockMutex(usize /* cookie */), + UnlockMutex(usize /* cookie */), + WaitForCondition(usize /* cookie */, usize /* timeout (ms) */), + NotifyCondition(usize /* cookie */, usize /* count */), + FreeMutex(usize /* cookie */), + FreeCondition(usize /* cookie */), +} + +impl Into<[usize; 5]> for TicktimerScalar { + fn into(self) -> [usize; 5] { + match self { + TicktimerScalar::ElapsedMs => [0, 0, 0, 0, 0], + TicktimerScalar::SleepMs(msecs) => [1, msecs, 0, 0, 0], + TicktimerScalar::LockMutex(cookie) => [6, cookie, 0, 0, 0], + TicktimerScalar::UnlockMutex(cookie) => [7, cookie, 0, 0, 0], + TicktimerScalar::WaitForCondition(cookie, timeout_ms) => [8, cookie, timeout_ms, 0, 0], + TicktimerScalar::NotifyCondition(cookie, count) => [9, cookie, count, 0, 0], + TicktimerScalar::FreeMutex(cookie) => [10, cookie, 0, 0, 0], + TicktimerScalar::FreeCondition(cookie) => [11, cookie, 0, 0, 0], + } + } +} + +/// Return a `Connection` to the ticktimer server. This server is used for synchronization +/// primitives such as sleep, Mutex, and Condvar. +pub(crate) fn ticktimer_server() -> Connection { + static TICKTIMER_SERVER_CONNECTION: AtomicU32 = AtomicU32::new(0); + let cid = TICKTIMER_SERVER_CONNECTION.load(Ordering::Relaxed); + if cid != 0 { + return cid.into(); + } + + let cid = crate::os::xous::ffi::connect("ticktimer-server".try_into().unwrap()).unwrap(); + TICKTIMER_SERVER_CONNECTION.store(cid.into(), Ordering::Relaxed); + cid +} diff --git a/library/std/src/process.rs b/library/std/src/process.rs index 762a1e9b40860..5df1105e264ea 100644 --- a/library/std/src/process.rs +++ b/library/std/src/process.rs @@ -101,7 +101,7 @@ #![stable(feature = "process", since = "1.0.0")] #![deny(unsafe_op_in_unsafe_fn)] -#[cfg(all(test, not(any(target_os = "emscripten", target_env = "sgx"))))] +#[cfg(all(test, not(any(target_os = "emscripten", target_env = "sgx", target_os = "xous"))))] mod tests; use crate::io::prelude::*; diff --git a/library/std/src/sys/mod.rs b/library/std/src/sys/mod.rs index 63bd783746b1b..457eb782ccc66 100644 --- a/library/std/src/sys/mod.rs +++ b/library/std/src/sys/mod.rs @@ -44,6 +44,9 @@ cfg_if::cfg_if! { } else if #[cfg(target_family = "wasm")] { mod wasm; pub use self::wasm::*; + } else if #[cfg(target_os = "xous")] { + mod xous; + pub use self::xous::*; } else if #[cfg(all(target_vendor = "fortanix", target_env = "sgx"))] { mod sgx; pub use self::sgx::*; diff --git a/library/std/src/sys/xous/alloc.rs b/library/std/src/sys/xous/alloc.rs new file mode 100644 index 0000000000000..b3a3e691e0d0c --- /dev/null +++ b/library/std/src/sys/xous/alloc.rs @@ -0,0 +1,62 @@ +use crate::alloc::{GlobalAlloc, Layout, System}; + +static mut DLMALLOC: dlmalloc::Dlmalloc = dlmalloc::Dlmalloc::new(); + +#[stable(feature = "alloc_system_type", since = "1.28.0")] +unsafe impl GlobalAlloc for System { + #[inline] + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + // SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access. + // Calling malloc() is safe because preconditions on this function match the trait method preconditions. + let _lock = lock::lock(); + unsafe { DLMALLOC.malloc(layout.size(), layout.align()) } + } + + #[inline] + unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { + // SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access. + // Calling calloc() is safe because preconditions on this function match the trait method preconditions. + let _lock = lock::lock(); + unsafe { DLMALLOC.calloc(layout.size(), layout.align()) } + } + + #[inline] + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + // SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access. + // Calling free() is safe because preconditions on this function match the trait method preconditions. + let _lock = lock::lock(); + unsafe { DLMALLOC.free(ptr, layout.size(), layout.align()) } + } + + #[inline] + unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + // SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access. + // Calling realloc() is safe because preconditions on this function match the trait method preconditions. + let _lock = lock::lock(); + unsafe { DLMALLOC.realloc(ptr, layout.size(), layout.align(), new_size) } + } +} + +mod lock { + use crate::sync::atomic::{AtomicI32, Ordering::SeqCst}; + + static LOCKED: AtomicI32 = AtomicI32::new(0); + + pub struct DropLock; + + pub fn lock() -> DropLock { + loop { + if LOCKED.swap(1, SeqCst) == 0 { + return DropLock; + } + crate::os::xous::ffi::do_yield(); + } + } + + impl Drop for DropLock { + fn drop(&mut self) { + let r = LOCKED.swap(0, SeqCst); + debug_assert_eq!(r, 1); + } + } +} diff --git a/library/std/src/sys/xous/locks/condvar.rs b/library/std/src/sys/xous/locks/condvar.rs new file mode 100644 index 0000000000000..1bb38dfa34154 --- /dev/null +++ b/library/std/src/sys/xous/locks/condvar.rs @@ -0,0 +1,111 @@ +use super::mutex::Mutex; +use crate::os::xous::ffi::{blocking_scalar, scalar}; +use crate::os::xous::services::ticktimer_server; +use crate::sync::Mutex as StdMutex; +use crate::time::Duration; + +// The implementation is inspired by Andrew D. Birrell's paper +// "Implementing Condition Variables with Semaphores" + +pub struct Condvar { + counter: StdMutex, +} + +unsafe impl Send for Condvar {} +unsafe impl Sync for Condvar {} + +impl Condvar { + #[inline] + #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] + pub const fn new() -> Condvar { + Condvar { counter: StdMutex::new(0) } + } + + pub fn notify_one(&self) { + let mut counter = self.counter.lock().unwrap(); + if *counter <= 0 { + return; + } else { + *counter -= 1; + } + let result = blocking_scalar( + ticktimer_server(), + crate::os::xous::services::TicktimerScalar::NotifyCondition(self.index(), 1).into(), + ); + drop(counter); + result.expect("failure to send NotifyCondition command"); + } + + pub fn notify_all(&self) { + let mut counter = self.counter.lock().unwrap(); + if *counter <= 0 { + return; + } + let result = blocking_scalar( + ticktimer_server(), + crate::os::xous::services::TicktimerScalar::NotifyCondition(self.index(), *counter) + .into(), + ); + *counter = 0; + drop(counter); + + result.expect("failure to send NotifyCondition command"); + } + + fn index(&self) -> usize { + self as *const Condvar as usize + } + + pub unsafe fn wait(&self, mutex: &Mutex) { + let mut counter = self.counter.lock().unwrap(); + *counter += 1; + unsafe { mutex.unlock() }; + drop(counter); + + let result = blocking_scalar( + ticktimer_server(), + crate::os::xous::services::TicktimerScalar::WaitForCondition(self.index(), 0).into(), + ); + unsafe { mutex.lock() }; + + result.expect("Ticktimer: failure to send WaitForCondition command"); + } + + pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { + let mut counter = self.counter.lock().unwrap(); + *counter += 1; + unsafe { mutex.unlock() }; + drop(counter); + + let mut millis = dur.as_millis() as usize; + if millis == 0 { + millis = 1; + } + + let result = blocking_scalar( + ticktimer_server(), + crate::os::xous::services::TicktimerScalar::WaitForCondition(self.index(), millis) + .into(), + ); + unsafe { mutex.lock() }; + + let result = result.expect("Ticktimer: failure to send WaitForCondition command")[0] == 0; + + // If we awoke due to a timeout, decrement the wake count, as that would not have + // been done in the `notify()` call. + if !result { + *self.counter.lock().unwrap() -= 1; + } + result + } +} + +impl Drop for Condvar { + fn drop(&mut self) { + scalar( + ticktimer_server(), + crate::os::xous::services::TicktimerScalar::FreeCondition(self.index()).into(), + ) + .ok(); + } +} diff --git a/library/std/src/sys/xous/locks/mod.rs b/library/std/src/sys/xous/locks/mod.rs new file mode 100644 index 0000000000000..f3c5c5d9fb0ce --- /dev/null +++ b/library/std/src/sys/xous/locks/mod.rs @@ -0,0 +1,7 @@ +mod condvar; +mod mutex; +mod rwlock; + +pub use condvar::*; +pub use mutex::*; +pub use rwlock::*; diff --git a/library/std/src/sys/xous/locks/mutex.rs b/library/std/src/sys/xous/locks/mutex.rs new file mode 100644 index 0000000000000..ea51776d54eca --- /dev/null +++ b/library/std/src/sys/xous/locks/mutex.rs @@ -0,0 +1,116 @@ +use crate::os::xous::ffi::{blocking_scalar, do_yield, scalar}; +use crate::os::xous::services::ticktimer_server; +use crate::sync::atomic::{AtomicBool, AtomicUsize, Ordering::Relaxed, Ordering::SeqCst}; + +pub struct Mutex { + /// The "locked" value indicates how many threads are waiting on this + /// Mutex. Possible values are: + /// 0: The lock is unlocked + /// 1: The lock is locked and uncontended + /// >=2: The lock is locked and contended + /// + /// A lock is "contended" when there is more than one thread waiting + /// for a lock, or it is locked for long periods of time. Rather than + /// spinning, these locks send a Message to the ticktimer server + /// requesting that they be woken up when a lock is unlocked. + locked: AtomicUsize, + + /// Whether this Mutex ever was contended, and therefore made a trip + /// to the ticktimer server. If this was never set, then we were never + /// on the slow path and can skip deregistering the mutex. + contended: AtomicBool, +} + +impl Mutex { + #[inline] + #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] + pub const fn new() -> Mutex { + Mutex { locked: AtomicUsize::new(0), contended: AtomicBool::new(false) } + } + + fn index(&self) -> usize { + self as *const Mutex as usize + } + + #[inline] + pub unsafe fn lock(&self) { + // Try multiple times to acquire the lock without resorting to the ticktimer + // server. For locks that are held for a short amount of time, this will + // result in the ticktimer server never getting invoked. The `locked` value + // will be either 0 or 1. + for _attempts in 0..3 { + if unsafe { self.try_lock() } { + return; + } + do_yield(); + } + + // Try one more time to lock. If the lock is released between the previous code and + // here, then the inner `locked` value will be 1 at the end of this. If it was not + // locked, then the value will be more than 1, for example if there are multiple other + // threads waiting on this lock. + if unsafe { self.try_lock_or_poison() } { + return; + } + + // When this mutex is dropped, we will need to deregister it with the server. + self.contended.store(true, Relaxed); + + // The lock is now "contended". When the lock is released, a Message will get sent to the + // ticktimer server to wake it up. Note that this may already have happened, so the actual + // value of `lock` may be anything (0, 1, 2, ...). + blocking_scalar( + ticktimer_server(), + crate::os::xous::services::TicktimerScalar::LockMutex(self.index()).into(), + ) + .expect("failure to send LockMutex command"); + } + + #[inline] + pub unsafe fn unlock(&self) { + let prev = self.locked.fetch_sub(1, SeqCst); + + // If the previous value was 1, then this was a "fast path" unlock, so no + // need to involve the Ticktimer server + if prev == 1 { + return; + } + + // If it was 0, then something has gone seriously wrong and the counter + // has just wrapped around. + if prev == 0 { + panic!("mutex lock count underflowed"); + } + + // Unblock one thread that is waiting on this message. + scalar( + ticktimer_server(), + crate::os::xous::services::TicktimerScalar::UnlockMutex(self.index()).into(), + ) + .expect("failure to send UnlockMutex command"); + } + + #[inline] + pub unsafe fn try_lock(&self) -> bool { + self.locked.compare_exchange(0, 1, SeqCst, SeqCst).is_ok() + } + + #[inline] + pub unsafe fn try_lock_or_poison(&self) -> bool { + self.locked.fetch_add(1, SeqCst) == 0 + } +} + +impl Drop for Mutex { + fn drop(&mut self) { + // If there was Mutex contention, then we involved the ticktimer. Free + // the resources associated with this Mutex as it is deallocated. + if self.contended.load(Relaxed) { + scalar( + ticktimer_server(), + crate::os::xous::services::TicktimerScalar::FreeMutex(self.index()).into(), + ) + .ok(); + } + } +} diff --git a/library/std/src/sys/xous/locks/rwlock.rs b/library/std/src/sys/xous/locks/rwlock.rs new file mode 100644 index 0000000000000..618da758adfa7 --- /dev/null +++ b/library/std/src/sys/xous/locks/rwlock.rs @@ -0,0 +1,72 @@ +use crate::os::xous::ffi::do_yield; +use crate::sync::atomic::{AtomicIsize, Ordering::SeqCst}; + +pub struct RwLock { + /// The "mode" value indicates how many threads are waiting on this + /// Mutex. Possible values are: + /// -1: The lock is locked for writing + /// 0: The lock is unlocked + /// >=1: The lock is locked for reading + /// + /// This currently spins waiting for the lock to be freed. An + /// optimization would be to involve the ticktimer server to + /// coordinate unlocks. + mode: AtomicIsize, +} + +unsafe impl Send for RwLock {} +unsafe impl Sync for RwLock {} + +impl RwLock { + #[inline] + #[rustc_const_stable(feature = "const_locks", since = "1.63.0")] + pub const fn new() -> RwLock { + RwLock { mode: AtomicIsize::new(0) } + } + + #[inline] + pub unsafe fn read(&self) { + while !unsafe { self.try_read() } { + do_yield(); + } + } + + #[inline] + pub unsafe fn try_read(&self) -> bool { + // Non-atomically determine the current value. + let current = self.mode.load(SeqCst); + + // If it's currently locked for writing, then we cannot read. + if current < 0 { + return false; + } + + // Attempt to lock. If the `current` value has changed, then this + // operation will fail and we will not obtain the lock even if we + // could potentially keep it. + let new = current + 1; + self.mode.compare_exchange(current, new, SeqCst, SeqCst).is_ok() + } + + #[inline] + pub unsafe fn write(&self) { + while !unsafe { self.try_write() } { + do_yield(); + } + } + + #[inline] + pub unsafe fn try_write(&self) -> bool { + self.mode.compare_exchange(0, -1, SeqCst, SeqCst).is_ok() + } + + #[inline] + pub unsafe fn read_unlock(&self) { + self.mode.fetch_sub(1, SeqCst); + } + + #[inline] + pub unsafe fn write_unlock(&self) { + assert_eq!(self.mode.compare_exchange(-1, 0, SeqCst, SeqCst), Ok(-1)); + } +} diff --git a/library/std/src/sys/xous/mod.rs b/library/std/src/sys/xous/mod.rs new file mode 100644 index 0000000000000..6d5c218d19570 --- /dev/null +++ b/library/std/src/sys/xous/mod.rs @@ -0,0 +1,37 @@ +#![deny(unsafe_op_in_unsafe_fn)] + +pub mod alloc; +#[path = "../unsupported/args.rs"] +pub mod args; +#[path = "../unix/cmath.rs"] +pub mod cmath; +#[path = "../unsupported/env.rs"] +pub mod env; +#[path = "../unsupported/fs.rs"] +pub mod fs; +#[path = "../unsupported/io.rs"] +pub mod io; +pub mod locks; +#[path = "../unsupported/net.rs"] +pub mod net; +#[path = "../unsupported/once.rs"] +pub mod once; +pub mod os; +#[path = "../unix/os_str.rs"] +pub mod os_str; +#[path = "../unix/path.rs"] +pub mod path; +#[path = "../unsupported/pipe.rs"] +pub mod pipe; +#[path = "../unsupported/process.rs"] +pub mod process; +pub mod stdio; +pub mod thread; +pub mod thread_local_key; +#[path = "../unsupported/thread_parking.rs"] +pub mod thread_parking; +pub mod time; + +#[path = "../unsupported/common.rs"] +mod common; +pub use common::*; diff --git a/library/std/src/sys/xous/os.rs b/library/std/src/sys/xous/os.rs new file mode 100644 index 0000000000000..3d19fa4b31af4 --- /dev/null +++ b/library/std/src/sys/xous/os.rs @@ -0,0 +1,147 @@ +use super::unsupported; +use crate::error::Error as StdError; +use crate::ffi::{OsStr, OsString}; +use crate::fmt; +use crate::io; +use crate::marker::PhantomData; +use crate::os::xous::ffi::Error as XousError; +use crate::path::{self, PathBuf}; + +#[cfg(not(test))] +mod c_compat { + use crate::os::xous::ffi::exit; + extern "C" { + fn main() -> u32; + } + + #[no_mangle] + pub extern "C" fn abort() { + exit(1); + } + + #[no_mangle] + pub extern "C" fn _start() { + exit(unsafe { main() }); + } + + // This function is needed by the panic runtime. The symbol is named in + // pre-link args for the target specification, so keep that in sync. + #[no_mangle] + // NB. used by both libunwind and libpanic_abort + pub extern "C" fn __rust_abort() -> ! { + exit(101); + } +} + +pub fn errno() -> i32 { + 0 +} + +pub fn error_string(errno: i32) -> String { + Into::::into(errno).to_string() +} + +pub fn getcwd() -> io::Result { + unsupported() +} + +pub fn chdir(_: &path::Path) -> io::Result<()> { + unsupported() +} + +pub struct SplitPaths<'a>(!, PhantomData<&'a ()>); + +pub fn split_paths(_unparsed: &OsStr) -> SplitPaths<'_> { + panic!("unsupported") +} + +impl<'a> Iterator for SplitPaths<'a> { + type Item = PathBuf; + fn next(&mut self) -> Option { + self.0 + } +} + +#[derive(Debug)] +pub struct JoinPathsError; + +pub fn join_paths(_paths: I) -> Result +where + I: Iterator, + T: AsRef, +{ + Err(JoinPathsError) +} + +impl fmt::Display for JoinPathsError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + "not supported on this platform yet".fmt(f) + } +} + +impl StdError for JoinPathsError { + #[allow(deprecated)] + fn description(&self) -> &str { + "not supported on this platform yet" + } +} + +pub fn current_exe() -> io::Result { + unsupported() +} + +pub struct Env(!); + +impl Env { + // FIXME(https://github.com/rust-lang/rust/issues/114583): Remove this when ::fmt matches ::fmt. + pub fn str_debug(&self) -> impl fmt::Debug + '_ { + let Self(inner) = self; + match *inner {} + } +} + +impl fmt::Debug for Env { + fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result { + let Self(inner) = self; + match *inner {} + } +} + +impl Iterator for Env { + type Item = (OsString, OsString); + fn next(&mut self) -> Option<(OsString, OsString)> { + self.0 + } +} + +pub fn env() -> Env { + panic!("not supported on this platform") +} + +pub fn getenv(_: &OsStr) -> Option { + None +} + +pub fn setenv(_: &OsStr, _: &OsStr) -> io::Result<()> { + Err(io::const_io_error!(io::ErrorKind::Unsupported, "cannot set env vars on this platform")) +} + +pub fn unsetenv(_: &OsStr) -> io::Result<()> { + Err(io::const_io_error!(io::ErrorKind::Unsupported, "cannot unset env vars on this platform")) +} + +pub fn temp_dir() -> PathBuf { + panic!("no filesystem on this platform") +} + +pub fn home_dir() -> Option { + None +} + +pub fn exit(code: i32) -> ! { + crate::os::xous::ffi::exit(code as u32); +} + +pub fn getpid() -> u32 { + panic!("no pids on this platform") +} diff --git a/library/std/src/sys/xous/stdio.rs b/library/std/src/sys/xous/stdio.rs new file mode 100644 index 0000000000000..2ac694641ba94 --- /dev/null +++ b/library/std/src/sys/xous/stdio.rs @@ -0,0 +1,131 @@ +use crate::io; + +pub struct Stdin; +pub struct Stdout {} +pub struct Stderr; + +use crate::os::xous::ffi::{lend, try_lend, try_scalar, Connection}; +use crate::os::xous::services::{log_server, try_connect, LogScalar}; + +impl Stdin { + pub const fn new() -> Stdin { + Stdin + } +} + +impl io::Read for Stdin { + fn read(&mut self, _buf: &mut [u8]) -> io::Result { + Ok(0) + } +} + +impl Stdout { + pub const fn new() -> Stdout { + Stdout {} + } +} + +impl io::Write for Stdout { + fn write(&mut self, buf: &[u8]) -> io::Result { + #[repr(align(4096))] + struct LendBuffer([u8; 4096]); + let mut lend_buffer = LendBuffer([0u8; 4096]); + let connection = log_server(); + for chunk in buf.chunks(lend_buffer.0.len()) { + for (dest, src) in lend_buffer.0.iter_mut().zip(chunk) { + *dest = *src; + } + lend(connection, 1, &lend_buffer.0, 0, chunk.len()).unwrap(); + } + Ok(buf.len()) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +impl Stderr { + pub const fn new() -> Stderr { + Stderr + } +} + +impl io::Write for Stderr { + fn write(&mut self, buf: &[u8]) -> io::Result { + #[repr(align(4096))] + struct LendBuffer([u8; 4096]); + let mut lend_buffer = LendBuffer([0u8; 4096]); + let connection = log_server(); + for chunk in buf.chunks(lend_buffer.0.len()) { + for (dest, src) in lend_buffer.0.iter_mut().zip(chunk) { + *dest = *src; + } + lend(connection, 1, &lend_buffer.0, 0, chunk.len()).unwrap(); + } + Ok(buf.len()) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +pub const STDIN_BUF_SIZE: usize = 0; + +pub fn is_ebadf(_err: &io::Error) -> bool { + true +} + +#[derive(Copy, Clone)] +pub struct PanicWriter { + log: Connection, + gfx: Option, +} + +impl io::Write for PanicWriter { + fn write(&mut self, s: &[u8]) -> core::result::Result { + for c in s.chunks(core::mem::size_of::() * 4) { + // Text is grouped into 4x `usize` words. The id is 1100 plus + // the number of characters in this message. + // Ignore errors since we're already panicking. + try_scalar(self.log, LogScalar::AppendPanicMessage(&c).into()).ok(); + } + + // Serialize the text to the graphics panic handler, only if we were able + // to acquire a connection to it. Text length is encoded in the `valid` field, + // the data itself in the buffer. Typically several messages are require to + // fully transmit the entire panic message. + if let Some(gfx) = self.gfx { + #[repr(C, align(4096))] + struct Request([u8; 4096]); + let mut request = Request([0u8; 4096]); + for (&s, d) in s.iter().zip(request.0.iter_mut()) { + *d = s; + } + try_lend(gfx, 0 /* AppendPanicText */, &request.0, 0, s.len()).ok(); + } + Ok(s.len()) + } + + // Tests show that this does not seem to be reliably called at the end of a panic + // print, so, we can't rely on this to e.g. trigger a graphics update. + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +pub fn panic_output() -> Option { + // Generally this won't fail because every server has already connected, so + // this is likely to succeed. + let log = log_server(); + + // Send the "We're panicking" message (1000). + try_scalar(log, LogScalar::BeginPanic.into()).ok(); + + // This is will fail in the case that the connection table is full, or if the + // graphics server is not running. Most servers do not already have this connection. + let gfx = try_connect("panic-to-screen!"); + + Some(PanicWriter { log, gfx }) +} diff --git a/library/std/src/sys/xous/thread.rs b/library/std/src/sys/xous/thread.rs new file mode 100644 index 0000000000000..78c68de7bf38d --- /dev/null +++ b/library/std/src/sys/xous/thread.rs @@ -0,0 +1,144 @@ +use crate::ffi::CStr; +use crate::io; +use crate::num::NonZeroUsize; +use crate::os::xous::ffi::{ + blocking_scalar, create_thread, do_yield, join_thread, map_memory, update_memory_flags, + MemoryFlags, Syscall, ThreadId, +}; +use crate::os::xous::services::{ticktimer_server, TicktimerScalar}; +use crate::time::Duration; +use core::arch::asm; + +pub struct Thread { + tid: ThreadId, +} + +pub const DEFAULT_MIN_STACK_SIZE: usize = 131072; +const MIN_STACK_SIZE: usize = 4096; +pub const GUARD_PAGE_SIZE: usize = 4096; + +impl Thread { + // unsafe: see thread::Builder::spawn_unchecked for safety requirements + pub unsafe fn new(stack: usize, p: Box) -> io::Result { + let p = Box::into_raw(Box::new(p)); + let mut stack_size = crate::cmp::max(stack, MIN_STACK_SIZE); + + if (stack_size & 4095) != 0 { + stack_size = (stack_size + 4095) & !4095; + } + + // Allocate the whole thing, then divide it up after the fact. This ensures that + // even if there's a context switch during this function, the whole stack plus + // guard pages will remain contiguous. + let stack_plus_guard_pages: &mut [u8] = unsafe { + map_memory( + None, + None, + GUARD_PAGE_SIZE + stack_size + GUARD_PAGE_SIZE, + MemoryFlags::R | MemoryFlags::W | MemoryFlags::X, + ) + } + .map_err(|code| io::Error::from_raw_os_error(code as i32))?; + + // No access to this page. Note: Write-only pages are illegal, and will + // cause an access violation. + unsafe { + update_memory_flags(&mut stack_plus_guard_pages[0..GUARD_PAGE_SIZE], MemoryFlags::W) + .map_err(|code| io::Error::from_raw_os_error(code as i32))? + }; + + // No access to this page. Note: Write-only pages are illegal, and will + // cause an access violation. + unsafe { + update_memory_flags( + &mut stack_plus_guard_pages[(GUARD_PAGE_SIZE + stack_size)..], + MemoryFlags::W, + ) + .map_err(|code| io::Error::from_raw_os_error(code as i32))? + }; + + let guard_page_pre = stack_plus_guard_pages.as_ptr() as usize; + let tid = create_thread( + thread_start as *mut usize, + &mut stack_plus_guard_pages[GUARD_PAGE_SIZE..(stack_size + GUARD_PAGE_SIZE)], + p as usize, + guard_page_pre, + stack_size, + 0, + ) + .map_err(|code| io::Error::from_raw_os_error(code as i32))?; + + extern "C" fn thread_start(main: *mut usize, guard_page_pre: usize, stack_size: usize) { + unsafe { + // Finally, let's run some code. + Box::from_raw(main as *mut Box)(); + } + + // Destroy TLS, which will free the TLS page and call the destructor for + // any thread local storage. + unsafe { + crate::sys::thread_local_key::destroy_tls(); + } + + // Deallocate the stack memory, along with the guard pages. Afterwards, + // exit the thread by returning to the magic address 0xff80_3000usize, + // which tells the kernel to deallocate this thread. + let mapped_memory_base = guard_page_pre; + let mapped_memory_length = GUARD_PAGE_SIZE + stack_size + GUARD_PAGE_SIZE; + unsafe { + asm!( + "ecall", + "ret", + in("a0") Syscall::UnmapMemory as usize, + in("a1") mapped_memory_base, + in("a2") mapped_memory_length, + in("ra") 0xff80_3000usize, + options(nomem, nostack, noreturn) + ); + } + } + + Ok(Thread { tid }) + } + + pub fn yield_now() { + do_yield(); + } + + pub fn set_name(_name: &CStr) { + // nope + } + + pub fn sleep(dur: Duration) { + // Because the sleep server works on units of `usized milliseconds`, split + // the messages up into these chunks. This means we may run into issues + // if you try to sleep a thread for more than 49 days on a 32-bit system. + let mut millis = dur.as_millis(); + while millis > 0 { + let sleep_duration = + if millis > (usize::MAX as _) { usize::MAX } else { millis as usize }; + blocking_scalar(ticktimer_server(), TicktimerScalar::SleepMs(sleep_duration).into()) + .expect("failed to send message to ticktimer server"); + millis -= sleep_duration as u128; + } + } + + pub fn join(self) { + join_thread(self.tid).unwrap(); + } +} + +pub fn available_parallelism() -> io::Result { + // We're unicore right now. + Ok(unsafe { NonZeroUsize::new_unchecked(1) }) +} + +pub mod guard { + pub type Guard = !; + pub unsafe fn current() -> Option { + None + } + pub unsafe fn init() -> Option { + None + } +} diff --git a/library/std/src/sys/xous/thread_local_key.rs b/library/std/src/sys/xous/thread_local_key.rs new file mode 100644 index 0000000000000..3771ea6570084 --- /dev/null +++ b/library/std/src/sys/xous/thread_local_key.rs @@ -0,0 +1,190 @@ +use crate::mem::ManuallyDrop; +use crate::ptr; +use crate::sync::atomic::AtomicPtr; +use crate::sync::atomic::AtomicUsize; +use crate::sync::atomic::Ordering::SeqCst; +use core::arch::asm; + +use crate::os::xous::ffi::{map_memory, unmap_memory, MemoryFlags}; + +/// Thread Local Storage +/// +/// Currently, we are limited to 1023 TLS entries. The entries +/// live in a page of memory that's unique per-process, and is +/// stored in the `$tp` register. If this register is 0, then +/// TLS has not been initialized and thread cleanup can be skipped. +/// +/// The index into this register is the `key`. This key is identical +/// between all threads, but indexes a different offset within this +/// pointer. +pub type Key = usize; + +pub type Dtor = unsafe extern "C" fn(*mut u8); + +const TLS_MEMORY_SIZE: usize = 4096; + +/// TLS keys start at `1` to mimic POSIX. +static TLS_KEY_INDEX: AtomicUsize = AtomicUsize::new(1); + +fn tls_ptr_addr() -> *mut usize { + let mut tp: usize; + unsafe { + asm!( + "mv {}, tp", + out(reg) tp, + ); + } + core::ptr::from_exposed_addr_mut::(tp) +} + +/// Create an area of memory that's unique per thread. This area will +/// contain all thread local pointers. +fn tls_ptr() -> *mut usize { + let mut tp = tls_ptr_addr(); + + // If the TP register is `0`, then this thread hasn't initialized + // its TLS yet. Allocate a new page to store this memory. + if tp.is_null() { + tp = unsafe { + map_memory( + None, + None, + TLS_MEMORY_SIZE / core::mem::size_of::(), + MemoryFlags::R | MemoryFlags::W, + ) + } + .expect("Unable to allocate memory for thread local storage") + .as_mut_ptr(); + + unsafe { + // Key #0 is currently unused. + (tp).write_volatile(0); + + // Set the thread's `$tp` register + asm!( + "mv tp, {}", + in(reg) tp as usize, + ); + } + } + tp +} + +/// Allocate a new TLS key. These keys are shared among all threads. +fn tls_alloc() -> usize { + TLS_KEY_INDEX.fetch_add(1, SeqCst) +} + +#[inline] +pub unsafe fn create(dtor: Option) -> Key { + let key = tls_alloc(); + if let Some(f) = dtor { + unsafe { register_dtor(key, f) }; + } + key +} + +#[inline] +pub unsafe fn set(key: Key, value: *mut u8) { + assert!((key < 1022) && (key >= 1)); + unsafe { tls_ptr().add(key).write_volatile(value as usize) }; +} + +#[inline] +pub unsafe fn get(key: Key) -> *mut u8 { + assert!((key < 1022) && (key >= 1)); + core::ptr::from_exposed_addr_mut::(unsafe { tls_ptr().add(key).read_volatile() }) +} + +#[inline] +pub unsafe fn destroy(_key: Key) { + panic!("can't destroy keys on Xous"); +} + +// ------------------------------------------------------------------------- +// Dtor registration (stolen from Windows) +// +// Xous has no native support for running destructors so we manage our own +// list of destructors to keep track of how to destroy keys. We then install a +// callback later to get invoked whenever a thread exits, running all +// appropriate destructors. +// +// Currently unregistration from this list is not supported. A destructor can be +// registered but cannot be unregistered. There's various simplifying reasons +// for doing this, the big ones being: +// +// 1. Currently we don't even support deallocating TLS keys, so normal operation +// doesn't need to deallocate a destructor. +// 2. There is no point in time where we know we can unregister a destructor +// because it could always be getting run by some remote thread. +// +// Typically processes have a statically known set of TLS keys which is pretty +// small, and we'd want to keep this memory alive for the whole process anyway +// really. +// +// Perhaps one day we can fold the `Box` here into a static allocation, +// expanding the `StaticKey` structure to contain not only a slot for the TLS +// key but also a slot for the destructor queue on windows. An optimization for +// another day! + +static DTORS: AtomicPtr = AtomicPtr::new(ptr::null_mut()); + +struct Node { + dtor: Dtor, + key: Key, + next: *mut Node, +} + +unsafe fn register_dtor(key: Key, dtor: Dtor) { + let mut node = ManuallyDrop::new(Box::new(Node { key, dtor, next: ptr::null_mut() })); + + let mut head = DTORS.load(SeqCst); + loop { + node.next = head; + match DTORS.compare_exchange(head, &mut **node, SeqCst, SeqCst) { + Ok(_) => return, // nothing to drop, we successfully added the node to the list + Err(cur) => head = cur, + } + } +} + +pub unsafe fn destroy_tls() { + let tp = tls_ptr_addr(); + + // If the pointer address is 0, then this thread has no TLS. + if tp.is_null() { + return; + } + unsafe { run_dtors() }; + + // Finally, free the TLS array + unsafe { + unmap_memory(core::slice::from_raw_parts_mut( + tp, + TLS_MEMORY_SIZE / core::mem::size_of::(), + )) + .unwrap() + }; +} + +unsafe fn run_dtors() { + let mut any_run = true; + for _ in 0..5 { + if !any_run { + break; + } + any_run = false; + let mut cur = DTORS.load(SeqCst); + while !cur.is_null() { + let ptr = unsafe { get((*cur).key) }; + + if !ptr.is_null() { + unsafe { set((*cur).key, ptr::null_mut()) }; + unsafe { ((*cur).dtor)(ptr as *mut _) }; + any_run = true; + } + + unsafe { cur = (*cur).next }; + } + } +} diff --git a/library/std/src/sys/xous/time.rs b/library/std/src/sys/xous/time.rs new file mode 100644 index 0000000000000..4e4ae67efffa2 --- /dev/null +++ b/library/std/src/sys/xous/time.rs @@ -0,0 +1,57 @@ +use crate::os::xous::ffi::blocking_scalar; +use crate::os::xous::services::{ + systime_server, ticktimer_server, SystimeScalar::GetUtcTimeMs, TicktimerScalar::ElapsedMs, +}; +use crate::time::Duration; + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +pub struct Instant(Duration); + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +pub struct SystemTime(Duration); + +pub const UNIX_EPOCH: SystemTime = SystemTime(Duration::from_secs(0)); + +impl Instant { + pub fn now() -> Instant { + let result = blocking_scalar(ticktimer_server(), ElapsedMs.into()) + .expect("failed to request elapsed_ms"); + let lower = result[0]; + let upper = result[1]; + Instant { 0: Duration::from_millis(lower as u64 | (upper as u64) << 32) } + } + + pub fn checked_sub_instant(&self, other: &Instant) -> Option { + self.0.checked_sub(other.0) + } + + pub fn checked_add_duration(&self, other: &Duration) -> Option { + self.0.checked_add(*other).map(Instant) + } + + pub fn checked_sub_duration(&self, other: &Duration) -> Option { + self.0.checked_sub(*other).map(Instant) + } +} + +impl SystemTime { + pub fn now() -> SystemTime { + let result = blocking_scalar(systime_server(), GetUtcTimeMs.into()) + .expect("failed to request utc time in ms"); + let lower = result[0]; + let upper = result[1]; + SystemTime { 0: Duration::from_millis((upper as u64) << 32 | lower as u64) } + } + + pub fn sub_time(&self, other: &SystemTime) -> Result { + self.0.checked_sub(other.0).ok_or_else(|| other.0 - self.0) + } + + pub fn checked_add_duration(&self, other: &Duration) -> Option { + Some(SystemTime(self.0.checked_add(*other)?)) + } + + pub fn checked_sub_duration(&self, other: &Duration) -> Option { + Some(SystemTime(self.0.checked_sub(*other)?)) + } +} diff --git a/library/std/src/sys_common/mod.rs b/library/std/src/sys_common/mod.rs index e9c727cbbd122..f7d821750635d 100644 --- a/library/std/src/sys_common/mod.rs +++ b/library/std/src/sys_common/mod.rs @@ -46,6 +46,7 @@ cfg_if::cfg_if! { if #[cfg(any(target_os = "l4re", feature = "restricted-std", all(target_family = "wasm", not(target_os = "emscripten")), + target_os = "xous", all(target_vendor = "fortanix", target_env = "sgx")))] { pub use crate::sys::net; } else { diff --git a/src/bootstrap/builder.rs b/src/bootstrap/builder.rs index 50f1007e1ff51..46a62eed952d1 100644 --- a/src/bootstrap/builder.rs +++ b/src/bootstrap/builder.rs @@ -1640,7 +1640,10 @@ impl<'a> Builder<'a> { // flesh out rpath support more fully in the future. rustflags.arg("-Zosx-rpath-install-name"); Some(format!("-Wl,-rpath,@loader_path/../{libdir}")) - } else if !target.contains("windows") && !target.contains("aix") { + } else if !target.contains("windows") + && !target.contains("aix") + && !target.contains("xous") + { rustflags.arg("-Clink-args=-Wl,-z,origin"); Some(format!("-Wl,-rpath,$ORIGIN/../{libdir}")) } else {