diff --git a/Cargo.lock b/Cargo.lock index e23d2f39a2f6..a9346db9e6d9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3602,6 +3602,7 @@ dependencies = [ "wasmtime-environ", "wasmtime-fiber", "wasmtime-jit-debug", + "wasmtime-types", "winapi", ] diff --git a/cranelift/object/tests/basic.rs b/cranelift/object/tests/basic.rs index cf5eca52731e..508947b2e53e 100644 --- a/cranelift/object/tests/basic.rs +++ b/cranelift/object/tests/basic.rs @@ -200,7 +200,7 @@ fn libcall_function() { #[test] #[should_panic( - expected = "Result::unwrap()` on an `Err` value: Backend(Symbol \"function\\0with\\0nul\\0bytes\" has a null byte, which is disallowed" + expected = "Result::unwrap()` on an `Err` value: Backend(Symbol \"function\\u{0}with\\u{0}nul\\u{0}bytes\" has a null byte, which is disallowed" )] fn reject_nul_byte_symbol_for_func() { let flag_builder = settings::builder(); @@ -224,7 +224,7 @@ fn reject_nul_byte_symbol_for_func() { #[test] #[should_panic( - expected = "Result::unwrap()` on an `Err` value: Backend(Symbol \"data\\0with\\0nul\\0bytes\" has a null byte, which is disallowed" + expected = "Result::unwrap()` on an `Err` value: Backend(Symbol \"data\\u{0}with\\u{0}nul\\u{0}bytes\" has a null byte, which is disallowed" )] fn reject_nul_byte_symbol_for_data() { let flag_builder = settings::builder(); diff --git a/crates/cranelift/src/compiler.rs b/crates/cranelift/src/compiler.rs index fbe9c0168414..5a2f7d9c55b6 100644 --- a/crates/cranelift/src/compiler.rs +++ b/crates/cranelift/src/compiler.rs @@ -16,7 +16,7 @@ use cranelift_codegen::{MachSrcLoc, MachStackMap}; use cranelift_entity::{EntityRef, PrimaryMap}; use cranelift_frontend::FunctionBuilder; use cranelift_wasm::{ - DefinedFuncIndex, DefinedMemoryIndex, FuncIndex, FuncTranslator, MemoryIndex, SignatureIndex, + DefinedFuncIndex, FuncIndex, FuncTranslator, MemoryIndex, OwnedMemoryIndex, SignatureIndex, WasmFuncType, }; use object::write::Object; @@ -332,8 +332,9 @@ impl wasmtime_environ::Compiler for Compiler { let memory_offset = if ofs.num_imported_memories > 0 { ModuleMemoryOffset::Imported(ofs.vmctx_vmmemory_import(MemoryIndex::new(0))) } else if ofs.num_defined_memories > 0 { + // TODO shared? ModuleMemoryOffset::Defined( - ofs.vmctx_vmmemory_definition_base(DefinedMemoryIndex::new(0)), + ofs.vmctx_vmmemory_definition_base(OwnedMemoryIndex::new(0)), ) } else { ModuleMemoryOffset::None diff --git a/crates/cranelift/src/func_environ.rs b/crates/cranelift/src/func_environ.rs index 90ff76c7a67d..1adb80f37555 100644 --- a/crates/cranelift/src/func_environ.rs +++ b/crates/cranelift/src/func_environ.rs @@ -1368,18 +1368,37 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m fn make_heap(&mut self, func: &mut ir::Function, index: MemoryIndex) -> WasmResult { let pointer_type = self.pointer_type(); - + let is_shared = self.module.memory_plans[index].memory.shared; let (ptr, base_offset, current_length_offset) = { let vmctx = self.vmctx(func); if let Some(def_index) = self.module.defined_memory_index(index) { - let base_offset = - i32::try_from(self.offsets.vmctx_vmmemory_definition_base(def_index)).unwrap(); - let current_length_offset = i32::try_from( - self.offsets - .vmctx_vmmemory_definition_current_length(def_index), - ) - .unwrap(); - (vmctx, base_offset, current_length_offset) + if is_shared { + // As with imported memory, the `VMMemoryDefinition` for a + // shared memory is stored elsewhere. We store a `*mut + // VMMemoryDefinition` to it and dereference that when + // atomically growing it. + let from_offset = self.offsets.vmctx_vmmemory_pointer(def_index); + let memory = func.create_global_value(ir::GlobalValueData::Load { + base: vmctx, + offset: Offset32::new(i32::try_from(from_offset).unwrap()), + global_type: pointer_type, + readonly: true, + }); + let base_offset = i32::from(self.offsets.vmmemory_definition_base()); + let current_length_offset = + i32::from(self.offsets.vmmemory_definition_current_length()); + (memory, base_offset, current_length_offset) + } else { + let owned_index = self.module.owned_memory_index(def_index).expect("TODO"); + let owned_base_offset = + self.offsets.vmctx_vmmemory_definition_base(owned_index); + let owned_length_offset = self + .offsets + .vmctx_vmmemory_definition_current_length(owned_index); + let current_base_offset = i32::try_from(owned_base_offset).unwrap(); + let current_length_offset = i32::try_from(owned_length_offset).unwrap(); + (vmctx, current_base_offset, current_length_offset) + } } else { let from_offset = self.offsets.vmctx_vmmemory_import_from(index); let memory = func.create_global_value(ir::GlobalValueData::Load { @@ -1693,16 +1712,33 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m ) -> WasmResult { let pointer_type = self.pointer_type(); let vmctx = self.vmctx(&mut pos.func); + let is_shared = self.module.memory_plans[index].memory.shared; let base = pos.ins().global_value(pointer_type, vmctx); let current_length_in_bytes = match self.module.defined_memory_index(index) { Some(def_index) => { - let offset = i32::try_from( - self.offsets - .vmctx_vmmemory_definition_current_length(def_index), - ) - .unwrap(); - pos.ins() - .load(pointer_type, ir::MemFlags::trusted(), base, offset) + if is_shared { + let offset = + i32::try_from(self.offsets.vmctx_vmmemory_pointer(def_index)).unwrap(); + let vmmemory_ptr = + pos.ins() + .load(pointer_type, ir::MemFlags::trusted(), base, offset); + // TODO should be an atomic_load (need a way to to do atomic_load + offset). + pos.ins().load( + pointer_type, + ir::MemFlags::trusted(), + vmmemory_ptr, + i32::from(self.offsets.vmmemory_definition_current_length()), + ) + } else { + let owned_index = self.module.owned_memory_index(def_index).expect("TODO"); + let offset = i32::try_from( + self.offsets + .vmctx_vmmemory_definition_current_length(owned_index), + ) + .unwrap(); + pos.ins() + .load(pointer_type, ir::MemFlags::trusted(), base, offset) + } } None => { let offset = i32::try_from(self.offsets.vmctx_vmmemory_import_from(index)).unwrap(); diff --git a/crates/environ/src/module.rs b/crates/environ/src/module.rs index 6b5b7ba91c3a..8b865d66aca9 100644 --- a/crates/environ/src/module.rs +++ b/crates/environ/src/module.rs @@ -10,7 +10,7 @@ use std::mem; use std::ops::Range; use wasmtime_types::*; -/// Implemenation styles for WebAssembly linear memory. +/// Implementation styles for WebAssembly linear memory. #[derive(Debug, Clone, Hash, Serialize, Deserialize)] pub enum MemoryStyle { /// The actual memory can be resized and moved. @@ -18,7 +18,7 @@ pub enum MemoryStyle { /// Extra space to reserve when a memory must be moved due to growth. reserve: u64, }, - /// Addresss space is allocated up front. + /// Address space is allocated up front. Static { /// The number of mapped and unmapped pages. bound: u64, @@ -160,7 +160,7 @@ pub enum MemoryInitialization { /// which might reside in a compiled module on disk, available immediately /// in a linear memory's address space. /// - /// To facilitate the latter fo these techniques the `try_static_init` + /// To facilitate the latter of these techniques the `try_static_init` /// function below, which creates this variant, takes a host page size /// argument which can page-align everything to make mmap-ing possible. Static { @@ -919,6 +919,25 @@ impl Module { } } + /// Convert a `DefinedMemoryIndex` into an `OwnedMemoryIndex`. Returns None + /// if the index is an imported memory. + #[inline] + pub fn owned_memory_index(&self, memory: DefinedMemoryIndex) -> Option { + if memory.index() >= self.memory_plans.len() { + return None; + } + // Once we know that the memory index is not greater than the number of + // plans, we can iterate through the plans up to the memory index and + // count how many are not shared (i.e., owned). + let owned_memory_index = self + .memory_plans + .iter() + .take(memory.index()) + .filter(|(_, mp)| !mp.memory.shared) + .count(); + Some(OwnedMemoryIndex::new(owned_memory_index)) + } + /// Test whether the given memory index is for an imported memory. #[inline] pub fn is_imported_memory(&self, index: MemoryIndex) -> bool { diff --git a/crates/environ/src/module_environ.rs b/crates/environ/src/module_environ.rs index 0df931c3be6b..f68fdeb7bb17 100644 --- a/crates/environ/src/module_environ.rs +++ b/crates/environ/src/module_environ.rs @@ -240,7 +240,7 @@ impl<'a, 'data> ModuleEnvironment<'a, 'data> { EntityType::Function(sig_index) } TypeRef::Memory(ty) => { - if ty.shared { + if ty.shared && !self.validator.features().threads { return Err(WasmError::Unsupported("shared memories".to_owned())); } self.result.module.num_imported_memories += 1; @@ -296,7 +296,7 @@ impl<'a, 'data> ModuleEnvironment<'a, 'data> { for entry in memories { let memory = entry?; - if memory.shared { + if memory.shared && !self.validator.features().threads { return Err(WasmError::Unsupported("shared memories".to_owned())); } let plan = MemoryPlan::for_memory(memory.into(), &self.tunables); diff --git a/crates/environ/src/vmoffsets.rs b/crates/environ/src/vmoffsets.rs index 36ff9c6c75e6..7c69760371f2 100644 --- a/crates/environ/src/vmoffsets.rs +++ b/crates/environ/src/vmoffsets.rs @@ -14,7 +14,8 @@ // imported_memories: [VMMemoryImport; module.num_imported_memories], // imported_globals: [VMGlobalImport; module.num_imported_globals], // tables: [VMTableDefinition; module.num_defined_tables], -// memories: [VMMemoryDefinition; module.num_defined_memories], +// memories: [*mut VMMemoryDefinition; module.num_defined_memories], +// owned_memories: [VMMemoryDefinition; module.num_owned_memories], // globals: [VMGlobalDefinition; module.num_defined_globals], // anyfuncs: [VMCallerCheckedAnyfunc; module.num_escaped_funcs], // } @@ -26,6 +27,7 @@ use crate::{ use cranelift_entity::packed_option::ReservedValue; use more_asserts::assert_lt; use std::convert::TryFrom; +use wasmtime_types::OwnedMemoryIndex; /// Sentinel value indicating that wasm has been interrupted. // Note that this has a bit of an odd definition. See the `insert_stack_check` @@ -67,6 +69,8 @@ pub struct VMOffsets

{ pub num_defined_tables: u32, /// The number of defined memories in the module. pub num_defined_memories: u32, + /// The number of memories owned by the module instance. + pub num_owned_memories: u32, /// The number of defined globals in the module. pub num_defined_globals: u32, /// The number of escaped functions in the module, the size of the anyfuncs @@ -86,6 +90,7 @@ pub struct VMOffsets

{ imported_globals: u32, defined_tables: u32, defined_memories: u32, + owned_memories: u32, defined_globals: u32, defined_anyfuncs: u32, size: u32, @@ -133,9 +138,11 @@ pub struct VMOffsetsFields

{ pub num_defined_tables: u32, /// The number of defined memories in the module. pub num_defined_memories: u32, + /// The number of memories owned by the module instance. + pub num_owned_memories: u32, /// The number of defined globals in the module. pub num_defined_globals: u32, - /// The numbe of escaped functions in the module, the size of the anyfunc + /// The number of escaped functions in the module, the size of the anyfunc /// array. pub num_escaped_funcs: u32, } @@ -143,6 +150,11 @@ pub struct VMOffsetsFields

{ impl VMOffsets

{ /// Return a new `VMOffsets` instance, for a given pointer size. pub fn new(ptr: P, module: &Module) -> Self { + let num_shared_memories = module + .memory_plans + .iter() + .filter(|p| p.1.memory.shared) + .count(); VMOffsets::from(VMOffsetsFields { ptr, num_imported_functions: cast_to_u32(module.num_imported_funcs), @@ -152,6 +164,7 @@ impl VMOffsets

{ num_defined_functions: cast_to_u32(module.functions.len()), num_defined_tables: cast_to_u32(module.table_plans.len()), num_defined_memories: cast_to_u32(module.memory_plans.len()), + num_owned_memories: cast_to_u32(module.memory_plans.len() - num_shared_memories), num_defined_globals: cast_to_u32(module.globals.len()), num_escaped_funcs: cast_to_u32(module.num_escaped_funcs), }) @@ -181,13 +194,14 @@ impl VMOffsets

{ num_defined_tables: _, num_defined_globals: _, num_defined_memories: _, + num_owned_memories: _, num_defined_functions: _, num_escaped_funcs: _, // used as the initial size below size, - // exhaustively match teh rest of the fields with input from + // exhaustively match the rest of the fields with input from // the macro $($name,)* } = *self; @@ -211,6 +225,7 @@ impl VMOffsets

{ defined_anyfuncs: "module functions", defined_globals: "defined globals", defined_memories: "defined memories", + owned_memories: "owned memories", defined_tables: "defined tables", imported_globals: "imported globals", imported_memories: "imported memories", @@ -237,6 +252,7 @@ impl From> for VMOffsets

{ num_defined_functions: fields.num_defined_functions, num_defined_tables: fields.num_defined_tables, num_defined_memories: fields.num_defined_memories, + num_owned_memories: fields.num_owned_memories, num_defined_globals: fields.num_defined_globals, num_escaped_funcs: fields.num_escaped_funcs, runtime_limits: 0, @@ -251,6 +267,7 @@ impl From> for VMOffsets

{ imported_globals: 0, defined_tables: 0, defined_memories: 0, + owned_memories: 0, defined_globals: 0, defined_anyfuncs: 0, size: 0, @@ -303,7 +320,9 @@ impl From> for VMOffsets

{ size(defined_tables) = cmul(ret.num_defined_tables, ret.size_of_vmtable_definition()), size(defined_memories) - = cmul(ret.num_defined_memories, ret.size_of_vmmemory_definition()), + = cmul(ret.num_defined_memories, ret.size_of_vmmemory_pointer()), + size(owned_memories) + = cmul(ret.num_owned_memories, ret.size_of_vmmemory_definition()), align(16), size(defined_globals) = cmul(ret.num_defined_globals, ret.size_of_vmglobal_definition()), @@ -445,6 +464,12 @@ impl VMOffsets

{ pub fn size_of_vmmemory_definition(&self) -> u8 { 2 * self.pointer_size() } + + /// Return the size of `*mut VMMemoryDefinition`. + #[inline] + pub fn size_of_vmmemory_pointer(&self) -> u8 { + self.pointer_size() + } } /// Offsets for `VMGlobalImport`. @@ -604,6 +629,12 @@ impl VMOffsets

{ self.defined_memories } + /// The offset of the `owned_memories` array. + #[inline] + pub fn vmctx_owned_memories_begin(&self) -> u32 { + self.owned_memories + } + /// The offset of the `globals` array. #[inline] pub fn vmctx_globals_begin(&self) -> u32 { @@ -667,11 +698,19 @@ impl VMOffsets

{ self.vmctx_tables_begin() + index.as_u32() * u32::from(self.size_of_vmtable_definition()) } - /// Return the offset to `VMMemoryDefinition` index `index`. + /// Return the offset to the `*mut VMMemoryDefinition` at index `index`. #[inline] - pub fn vmctx_vmmemory_definition(&self, index: DefinedMemoryIndex) -> u32 { + pub fn vmctx_vmmemory_pointer(&self, index: DefinedMemoryIndex) -> u32 { assert_lt!(index.as_u32(), self.num_defined_memories); - self.vmctx_memories_begin() + index.as_u32() * u32::from(self.size_of_vmmemory_definition()) + self.vmctx_memories_begin() + index.as_u32() * u32::from(self.size_of_vmmemory_pointer()) + } + + /// Return the offset to the owned `VMMemoryDefinition` at index `index`. + #[inline] + pub fn vmctx_vmmemory_definition(&self, index: OwnedMemoryIndex) -> u32 { + assert_lt!(index.as_u32(), self.num_owned_memories); + self.vmctx_owned_memories_begin() + + index.as_u32() * u32::from(self.size_of_vmmemory_definition()) } /// Return the offset to the `VMGlobalDefinition` index `index`. @@ -735,13 +774,13 @@ impl VMOffsets

{ /// Return the offset to the `base` field in `VMMemoryDefinition` index `index`. #[inline] - pub fn vmctx_vmmemory_definition_base(&self, index: DefinedMemoryIndex) -> u32 { + pub fn vmctx_vmmemory_definition_base(&self, index: OwnedMemoryIndex) -> u32 { self.vmctx_vmmemory_definition(index) + u32::from(self.vmmemory_definition_base()) } /// Return the offset to the `current_length` field in `VMMemoryDefinition` index `index`. #[inline] - pub fn vmctx_vmmemory_definition_current_length(&self, index: DefinedMemoryIndex) -> u32 { + pub fn vmctx_vmmemory_definition_current_length(&self, index: OwnedMemoryIndex) -> u32 { self.vmctx_vmmemory_definition(index) + u32::from(self.vmmemory_definition_current_length()) } diff --git a/crates/runtime/Cargo.toml b/crates/runtime/Cargo.toml index 84d355eb699d..282187d823c2 100644 --- a/crates/runtime/Cargo.toml +++ b/crates/runtime/Cargo.toml @@ -14,6 +14,7 @@ edition = "2021" wasmtime-environ = { path = "../environ", version = "=0.38.0" } wasmtime-fiber = { path = "../fiber", version = "=0.38.0", optional = true } wasmtime-jit-debug = { path = "../jit-debug", version = "=0.38.0", features = ["gdb_jit_int"] } +wasmtime-types = { path = "../types", version = "=0.38.0" } region = "2.1.0" libc = { version = "0.2.112", default-features = false } log = "0.4.8" diff --git a/crates/runtime/src/externref.rs b/crates/runtime/src/externref.rs index 7b5b3ef06d66..91447a8019e3 100644 --- a/crates/runtime/src/externref.rs +++ b/crates/runtime/src/externref.rs @@ -1048,6 +1048,7 @@ mod tests { num_defined_functions: 0, num_defined_tables: 0, num_defined_memories: 0, + num_owned_memories: 0, num_defined_globals: 0, num_escaped_funcs: 0, }); @@ -1075,6 +1076,7 @@ mod tests { num_defined_functions: 0, num_defined_tables: 0, num_defined_memories: 0, + num_owned_memories: 0, num_defined_globals: 0, num_escaped_funcs: 0, }); @@ -1102,6 +1104,7 @@ mod tests { num_defined_functions: 0, num_defined_tables: 0, num_defined_memories: 0, + num_owned_memories: 0, num_defined_globals: 0, num_escaped_funcs: 0, }); diff --git a/crates/runtime/src/instance.rs b/crates/runtime/src/instance.rs index b08af11a17ec..5b29bf86ff55 100644 --- a/crates/runtime/src/instance.rs +++ b/crates/runtime/src/instance.rs @@ -211,7 +211,7 @@ impl Instance { /// Return the indexed `VMMemoryDefinition`. fn memory_ptr(&self, index: DefinedMemoryIndex) -> *mut VMMemoryDefinition { - unsafe { self.vmctx_plus_offset(self.offsets.vmctx_vmmemory_definition(index)) } + unsafe { *self.vmctx_plus_offset(self.offsets.vmctx_vmmemory_pointer(index)) } } /// Return the indexed `VMGlobalDefinition`. @@ -933,10 +933,28 @@ impl Instance { ptr = ptr.add(1); } - // Initialize the defined memories + // Initialize the defined memories. This fills in both the + // `defined_memories` table and the `owned_memories` table at the same + // time. Entries in `defined_memories` hold a pointer to a definition + // (all memories) whereas the `owned_memories` hold the actual + // definitions of memories owned (not shared) in the module. let mut ptr = self.vmctx_plus_offset(self.offsets.vmctx_memories_begin()); + let mut owned_ptr = self.vmctx_plus_offset(self.offsets.vmctx_owned_memories_begin()); for i in 0..module.memory_plans.len() - module.num_imported_memories { - ptr::write(ptr, self.memories[DefinedMemoryIndex::new(i)].vmmemory()); + if module.memory_plans[MemoryIndex::new(i)].memory.shared { + let def_ptr = self.memories[DefinedMemoryIndex::new(i)] + .as_shared_memory() + .unwrap() + .vmmemory_ptr_mut(); + ptr::write(ptr, def_ptr); + } else { + ptr::write( + owned_ptr, + self.memories[DefinedMemoryIndex::new(i)].vmmemory(), + ); + ptr::write(ptr, owned_ptr); + owned_ptr = owned_ptr.add(1); + } ptr = ptr.add(1); } @@ -1103,9 +1121,9 @@ impl InstanceHandle { } /// Return the memory index for the given `VMMemoryDefinition` in this instance. - pub unsafe fn memory_index(&self, memory: &VMMemoryDefinition) -> DefinedMemoryIndex { - self.instance().memory_index(memory) - } + // pub unsafe fn memory_index(&self, memory: &VMMemoryDefinition) -> DefinedMemoryIndex { + // self.instance().memory_index(memory) + // } /// Get a memory defined locally within this module. pub fn get_defined_memory(&mut self, index: DefinedMemoryIndex) -> *mut Memory { diff --git a/crates/runtime/src/instance/allocator.rs b/crates/runtime/src/instance/allocator.rs index 3e45687d6104..0e8aeda86af7 100644 --- a/crates/runtime/src/instance/allocator.rs +++ b/crates/runtime/src/instance/allocator.rs @@ -513,6 +513,38 @@ impl Default for OnDemandInstanceAllocator { } } +/// Allocate an instance containing a single memory. +/// +/// In order to import a [`Memory`] into a WebAssembly instance, Wasmtime +/// requires that memory to exist in its own instance. Here we bring to life +/// such a "Frankenstein" instance with the only purpose of exporting a +/// [`Memory`]. +/// +/// TODO explain how this applies to shared memory +pub unsafe fn allocate_single_memory_instance( + req: InstanceAllocationRequest, + memory: Memory, +) -> Result { + let mut memories = PrimaryMap::default(); + memories.push(memory); + let tables = PrimaryMap::default(); + let module = req.runtime_info.module(); + let offsets = VMOffsets::new(HostPtr, module); + let layout = Instance::alloc_layout(&offsets); + let instance = alloc::alloc(layout) as *mut Instance; + Instance::new_at(instance, layout.size(), offsets, req, memories, tables); + Ok(InstanceHandle { instance }) +} + +/// Internal implementation of [`InstanceHandle`] deallocation. +/// +/// See [`InstanceAllocator::deallocate()`] for more details. +pub unsafe fn deallocate(handle: &InstanceHandle) { + let layout = Instance::alloc_layout(&handle.instance().offsets); + ptr::drop_in_place(handle.instance); + alloc::dealloc(handle.instance.cast(), layout); +} + unsafe impl InstanceAllocator for OnDemandInstanceAllocator { unsafe fn allocate( &self, @@ -542,9 +574,7 @@ unsafe impl InstanceAllocator for OnDemandInstanceAllocator { } unsafe fn deallocate(&self, handle: &InstanceHandle) { - let layout = Instance::alloc_layout(&handle.instance().offsets); - ptr::drop_in_place(handle.instance); - alloc::dealloc(handle.instance.cast(), layout); + deallocate(handle) } #[cfg(feature = "async")] diff --git a/crates/runtime/src/lib.rs b/crates/runtime/src/lib.rs index d7d7d0ec92e2..f5e41ad2e012 100644 --- a/crates/runtime/src/lib.rs +++ b/crates/runtime/src/lib.rs @@ -50,12 +50,14 @@ pub use crate::export::*; pub use crate::externref::*; pub use crate::imports::Imports; pub use crate::instance::{ - InstanceAllocationRequest, InstanceAllocator, InstanceHandle, InstantiationError, LinkError, - OnDemandInstanceAllocator, StorePtr, + allocate_single_memory_instance, InstanceAllocationRequest, InstanceAllocator, InstanceHandle, + InstantiationError, LinkError, OnDemandInstanceAllocator, StorePtr, }; #[cfg(feature = "pooling-allocator")] pub use crate::instance::{InstanceLimits, PoolingAllocationStrategy, PoolingInstanceAllocator}; -pub use crate::memory::{DefaultMemoryCreator, Memory, RuntimeLinearMemory, RuntimeMemoryCreator}; +pub use crate::memory::{ + DefaultMemoryCreator, Memory, RuntimeLinearMemory, RuntimeMemoryCreator, SharedMemory, +}; pub use crate::mmap::Mmap; pub use crate::mmap_vec::MmapVec; pub use crate::table::{Table, TableElement}; diff --git a/crates/runtime/src/memory.rs b/crates/runtime/src/memory.rs index 0d402dad4e25..2c4fa1c31fae 100644 --- a/crates/runtime/src/memory.rs +++ b/crates/runtime/src/memory.rs @@ -11,8 +11,8 @@ use anyhow::Error; use anyhow::{bail, format_err, Result}; use more_asserts::{assert_ge, assert_le}; use std::convert::TryFrom; -use std::sync::Arc; -use wasmtime_environ::{MemoryPlan, MemoryStyle, WASM32_MAX_PAGES, WASM64_MAX_PAGES}; +use std::sync::{Arc, RwLock}; +use wasmtime_environ::{MemoryPlan, MemoryStyle, Tunables, WASM32_MAX_PAGES, WASM64_MAX_PAGES}; const WASM_PAGE_SIZE: usize = wasmtime_environ::WASM_PAGE_SIZE as usize; const WASM_PAGE_SIZE_U64: u64 = wasmtime_environ::WASM_PAGE_SIZE as u64; @@ -60,6 +60,63 @@ pub trait RuntimeLinearMemory: Send + Sync { /// Returns `None` if the memory is unbounded. fn maximum_byte_size(&self) -> Option; + /// Grows a memory by `delta_pages`. + /// + /// This performs the necessary checks on the growth before delegating to + /// the underlying `grow_to` implementation. A default implementation of + /// this memory is provided here since this is assumed to be the same for + /// most kinds of memory; one exception is shared memory, which must perform + /// all the steps of the default implementation *plus* the required locking. + /// + /// The `store` is used only for error reporting. + fn grow(&mut self, delta_pages: u64, store: &mut dyn Store) -> Result, Error> { + let old_byte_size = self.byte_size(); + + // Wasm spec: when growing by 0 pages, always return the current size. + if delta_pages == 0 { + return Ok(Some(old_byte_size)); + } + + // The largest wasm-page-aligned region of memory is possible to + // represent in a `usize`. This will be impossible for the system to + // actually allocate. + let absolute_max = 0usize.wrapping_sub(WASM_PAGE_SIZE); + + // Calculate the byte size of the new allocation. Let it overflow up to + // `usize::MAX`, then clamp it down to `absolute_max`. + let new_byte_size = usize::try_from(delta_pages) + .unwrap_or(usize::MAX) + .saturating_mul(WASM_PAGE_SIZE) + .saturating_add(old_byte_size); + let new_byte_size = if new_byte_size > absolute_max { + absolute_max + } else { + new_byte_size + }; + + let maximum = self.maximum_byte_size(); + // Store limiter gets first chance to reject memory_growing. + if !store.memory_growing(old_byte_size, new_byte_size, maximum)? { + return Ok(None); + } + + // Never exceed maximum, even if limiter permitted it. + if let Some(max) = maximum { + if new_byte_size > max { + store.memory_grow_failed(&format_err!("Memory maximum size exceeded")); + return Ok(None); + } + } + + match self.grow_to(new_byte_size) { + Ok(_) => Ok(Some(old_byte_size)), + Err(e) => { + store.memory_grow_failed(&e); + Ok(None) + } + } + } + /// Grow memory to the specified amount of bytes. /// /// Returns an error if memory can't be grown by the specified amount @@ -77,7 +134,6 @@ pub trait RuntimeLinearMemory: Send + Sync { /// For the pooling allocator, we must be able to downcast this trait to its /// underlying structure. - #[cfg(feature = "pooling-allocator")] fn as_any_mut(&mut self) -> &mut dyn std::any::Any; } @@ -115,6 +171,7 @@ pub struct MmapMemory { impl MmapMemory { /// Create a new linear memory instance with specified minimum and maximum number of wasm pages. + /// TODO remove minimum/maximum; already contained within `plan`. pub fn new( plan: &MemoryPlan, minimum: usize, @@ -145,13 +202,14 @@ impl MmapMemory { (bound_bytes, 0) } }; + let request_bytes = pre_guard_bytes .checked_add(alloc_bytes) .and_then(|i| i.checked_add(extra_to_reserve_on_growth)) .and_then(|i| i.checked_add(offset_guard_bytes)) .ok_or_else(|| format_err!("cannot allocate {} with guard regions", minimum))?; - let mut mmap = Mmap::accessible_reserved(0, request_bytes)?; + if minimum > 0 { mmap.make_accessible(pre_guard_bytes, minimum)?; } @@ -260,7 +318,6 @@ impl RuntimeLinearMemory for MmapMemory { self.memory_image.is_none() } - #[cfg(feature = "pooling-allocator")] fn as_any_mut(&mut self) -> &mut dyn std::any::Any { self } @@ -268,7 +325,7 @@ impl RuntimeLinearMemory for MmapMemory { /// A "static" memory where the lifetime of the backing memory is managed /// elsewhere. Currently used with the pooling allocator. -struct ExternalMemory { +struct StaticMemory { /// The memory in the host for this wasm memory. The length of this /// slice is the maximum size of the memory that can be grown to. base: &'static mut [u8], @@ -286,7 +343,7 @@ struct ExternalMemory { memory_image: Option, } -impl ExternalMemory { +impl StaticMemory { fn new( base: &'static mut [u8], initial_size: usize, @@ -324,7 +381,7 @@ impl ExternalMemory { } } -impl RuntimeLinearMemory for ExternalMemory { +impl RuntimeLinearMemory for StaticMemory { fn byte_size(&self) -> usize { self.size } @@ -374,7 +431,100 @@ impl RuntimeLinearMemory for ExternalMemory { } } - #[cfg(feature = "pooling-allocator")] + fn as_any_mut(&mut self) -> &mut dyn std::any::Any { + self + } +} + +/// For shared memory (and only for shared memory), this lock-version restricts +/// access when growing the memory or checking its size. This is to conform with +/// the [thread proposal]: "When `IsSharedArrayBuffer(...)` is true, the return +/// value should be the result of an atomic read-modify-write of the new size to +/// the internal `length` slot." +/// +/// [thread proposal]: +/// https://github.com/WebAssembly/threads/blob/master/proposals/threads/Overview.md#webassemblymemoryprototypegrow +#[derive(Clone)] +pub struct SharedMemory(Arc); +impl SharedMemory { + /// Construct a new [`SharedMemory`]. + pub fn new(ty: wasmtime_types::Memory, tunables: &Tunables) -> Result { + assert!(ty.shared); + let plan = MemoryPlan::for_memory(ty, tunables); + let (minimum_bytes, maximum_bytes) = Memory::limit_new(&plan, None)?; + let mut mmap_memory = MmapMemory::new(&plan, minimum_bytes, maximum_bytes, None)?; + let def = LongTermVMMemoryDefinition(mmap_memory.vmmemory()); + let memory: RwLock> = RwLock::new(Box::new(mmap_memory)); + Ok(Self(Arc::new(SharedMemoryInner { memory, ty, def }))) + } + + /// TODO + pub fn wrap(mut memory: Box, ty: wasmtime_types::Memory) -> Self { + let def = LongTermVMMemoryDefinition(memory.vmmemory()); + Self(Arc::new(SharedMemoryInner { + memory: RwLock::new(memory), + ty, + def, + })) + } + + /// Return the memory type for this [`SharedMemory`]. + pub fn ty(&self) -> wasmtime_types::Memory { + self.0.ty + } + + /// Convert this shared memory into a [`Memory`]. + pub fn as_memory(self) -> Memory { + Memory(Box::new(self)) + } + + /// Return a mutable pointer to the shared memory's [VMMemoryDefinition]. + pub fn vmmemory_ptr_mut(&mut self) -> *mut VMMemoryDefinition { + &self.0.def.0 as *const _ as *mut _ + } + + /// Return a pointer to the shared memory's [VMMemoryDefinition]. + pub fn vmmemory_ptr(&self) -> *const VMMemoryDefinition { + &self.0.def.0 as *const _ + } +} + +struct SharedMemoryInner { + memory: RwLock>, + ty: wasmtime_types::Memory, + def: LongTermVMMemoryDefinition, +} + +struct LongTermVMMemoryDefinition(VMMemoryDefinition); +unsafe impl Send for LongTermVMMemoryDefinition {} +unsafe impl Sync for LongTermVMMemoryDefinition {} + +/// Proxy all calls through the [`RwLock`]. +impl RuntimeLinearMemory for SharedMemory { + fn byte_size(&self) -> usize { + self.0.memory.read().unwrap().byte_size() + } + + fn maximum_byte_size(&self) -> Option { + self.0.memory.read().unwrap().maximum_byte_size() + } + + fn grow(&mut self, delta_pages: u64, store: &mut dyn Store) -> Result, Error> { + self.0.memory.write().unwrap().grow(delta_pages, store) + } + + fn grow_to(&mut self, size: usize) -> Result<()> { + self.0.memory.write().unwrap().grow_to(size) + } + + fn vmmemory(&mut self) -> VMMemoryDefinition { + self.0.memory.write().unwrap().vmmemory() + } + + fn needs_init(&self) -> bool { + self.0.memory.read().unwrap().needs_init() + } + fn as_any_mut(&mut self) -> &mut dyn std::any::Any { self } @@ -391,13 +541,14 @@ impl Memory { store: &mut dyn Store, memory_image: Option<&Arc>, ) -> Result { - let (minimum, maximum) = Self::limit_new(plan, store)?; - Ok(Memory(creator.new_memory( - plan, - minimum, - maximum, - memory_image, - )?)) + let (minimum, maximum) = Self::limit_new(plan, Some(store))?; + let allocation = creator.new_memory(plan, minimum, maximum, memory_image)?; + let allocation = if plan.memory.shared { + Box::new(SharedMemory::wrap(allocation, plan.memory)) + } else { + allocation + }; + Ok(Memory(allocation)) } /// Create a new static (immovable) memory instance for the specified plan. @@ -408,17 +559,26 @@ impl Memory { memory_image: Option, store: &mut dyn Store, ) -> Result { - let (minimum, maximum) = Self::limit_new(plan, store)?; + let (minimum, maximum) = Self::limit_new(plan, Some(store))?; let pooled_memory = - ExternalMemory::new(base, minimum, maximum, make_accessible, memory_image)?; - Ok(Memory(Box::new(pooled_memory))) + StaticMemory::new(base, minimum, maximum, make_accessible, memory_image)?; + let allocation = Box::new(pooled_memory); + let allocation: Box = if plan.memory.shared { + Box::new(SharedMemory::wrap(allocation, plan.memory)) + } else { + allocation + }; + Ok(Memory(allocation)) } /// Calls the `store`'s limiter to optionally prevent a memory from being allocated. /// /// Returns the minimum size and optional maximum size of the memory, in /// bytes. - fn limit_new(plan: &MemoryPlan, store: &mut dyn Store) -> Result<(usize, Option)> { + fn limit_new( + plan: &MemoryPlan, + store: Option<&mut dyn Store>, + ) -> Result<(usize, Option)> { // Sanity-check what should already be true from wasm module validation. let absolute_max = if plan.memory.memory64 { WASM64_MAX_PAGES @@ -479,11 +639,13 @@ impl Memory { // calculation overflowed. This means that the `minimum` we're informing // the limiter is lossy and may not be 100% accurate, but for now the // expected uses of limiter means that's ok. - if !store.memory_growing(0, minimum.unwrap_or(absolute_max), maximum)? { - bail!( - "memory minimum size of {} pages exceeds memory limits", - plan.memory.minimum - ); + if let Some(store) = store { + if !store.memory_growing(0, minimum.unwrap_or(absolute_max), maximum)? { + bail!( + "memory minimum size of {} pages exceeds memory limits", + plan.memory.minimum + ); + } } // At this point we need to actually handle overflows, so bail out with @@ -541,50 +703,7 @@ impl Memory { delta_pages: u64, store: &mut dyn Store, ) -> Result, Error> { - let old_byte_size = self.byte_size(); - - // Wasm spec: when growing by 0 pages, always return the current size. - if delta_pages == 0 { - return Ok(Some(old_byte_size)); - } - - // largest wasm-page-aligned region of memory it is possible to - // represent in a usize. This will be impossible for the system to - // actually allocate. - let absolute_max = 0usize.wrapping_sub(WASM_PAGE_SIZE); - // calculate byte size of the new allocation. Let it overflow up to - // usize::MAX, then clamp it down to absolute_max. - let new_byte_size = usize::try_from(delta_pages) - .unwrap_or(usize::MAX) - .saturating_mul(WASM_PAGE_SIZE) - .saturating_add(old_byte_size); - let new_byte_size = if new_byte_size > absolute_max { - absolute_max - } else { - new_byte_size - }; - - let maximum = self.maximum_byte_size(); - // Store limiter gets first chance to reject memory_growing. - if !store.memory_growing(old_byte_size, new_byte_size, maximum)? { - return Ok(None); - } - - // Never exceed maximum, even if limiter permitted it. - if let Some(max) = maximum { - if new_byte_size > max { - store.memory_grow_failed(&format_err!("Memory maximum size exceeded")); - return Ok(None); - } - } - - match self.0.grow_to(new_byte_size) { - Ok(_) => Ok(Some(old_byte_size)), - Err(e) => { - store.memory_grow_failed(&e); - Ok(None) - } - } + self.0.grow(delta_pages, store) } /// Return a `VMMemoryDefinition` for exposing the memory to compiled wasm code. @@ -597,7 +716,7 @@ impl Memory { #[cfg(feature = "pooling-allocator")] pub fn is_static(&mut self) -> bool { let as_any = self.0.as_any_mut(); - as_any.downcast_ref::().is_some() + as_any.downcast_ref::().is_some() } /// Consume the memory, returning its [`MemoryImageSlot`] if any is present. @@ -606,10 +725,21 @@ impl Memory { #[cfg(feature = "pooling-allocator")] pub fn unwrap_static_image(mut self) -> Option { let as_any = self.0.as_any_mut(); - if let Some(m) = as_any.downcast_mut::() { + if let Some(m) = as_any.downcast_mut::() { std::mem::take(&mut m.memory_image) } else { None } } + + /// If the [Memory] is a [SharedMemory], unwrap it and return a clone to + /// that shared memory. + pub fn as_shared_memory(&mut self) -> Option { + let as_any = self.0.as_any_mut(); + if let Some(m) = as_any.downcast_mut::() { + Some(m.clone()) + } else { + None + } + } } diff --git a/crates/runtime/src/vmcontext.rs b/crates/runtime/src/vmcontext.rs index c670e3d9972d..60f897b8305e 100644 --- a/crates/runtime/src/vmcontext.rs +++ b/crates/runtime/src/vmcontext.rs @@ -122,6 +122,8 @@ pub struct VMMemoryImport { /// A pointer to the `VMContext` that owns the memory description. pub vmctx: *mut VMContext, + // TODO maybe need to keep track of this here... + // pub index: DefinedMemoryIndex, } // Declare that this type is send/sync, it's the responsibility of users of @@ -208,6 +210,13 @@ pub struct VMMemoryDefinition { pub current_length: usize, } +#[derive(Copy, Clone)] +#[repr(C)] +pub union VMMemoryUnion { + shared: *mut VMMemoryDefinition, + owned: VMMemoryDefinition, +} + #[cfg(test)] mod test_vmmemory_definition { use super::VMMemoryDefinition; diff --git a/crates/types/src/lib.rs b/crates/types/src/lib.rs index b3bac9ec9a7f..a41ec5ad0c8c 100644 --- a/crates/types/src/lib.rs +++ b/crates/types/src/lib.rs @@ -164,6 +164,11 @@ entity_impl!(DefinedTableIndex); pub struct DefinedMemoryIndex(u32); entity_impl!(DefinedMemoryIndex); +/// Index type of a defined memory inside the WebAssembly module. +#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug, Serialize, Deserialize)] +pub struct OwnedMemoryIndex(u32); +entity_impl!(OwnedMemoryIndex); + /// Index type of a defined global inside the WebAssembly module. #[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug, Serialize, Deserialize)] pub struct DefinedGlobalIndex(u32); diff --git a/crates/wasmtime/src/memory.rs b/crates/wasmtime/src/memory.rs index 596e11537ff1..39e11234af10 100644 --- a/crates/wasmtime/src/memory.rs +++ b/crates/wasmtime/src/memory.rs @@ -1,9 +1,10 @@ use crate::store::{StoreData, StoreOpaque, Stored}; use crate::trampoline::generate_memory_export; -use crate::{AsContext, AsContextMut, MemoryType, StoreContext, StoreContextMut}; +use crate::{AsContext, AsContextMut, Engine, MemoryType, StoreContext, StoreContextMut}; use anyhow::{bail, Result}; use std::convert::TryFrom; use std::slice; +use wasmtime_environ::DefinedMemoryIndex; /// Error for out of bounds [`Memory`] access. #[derive(Debug)] @@ -227,7 +228,7 @@ impl Memory { /// # } /// ``` pub fn new(mut store: impl AsContextMut, ty: MemoryType) -> Result { - Memory::_new(store.as_context_mut().0, ty) + Self::_new(store.as_context_mut().0, ty, None) } #[cfg_attr(nightlydoc, doc(cfg(feature = "async")))] @@ -252,12 +253,68 @@ impl Memory { store.0.async_support(), "cannot use `new_async` without enabling async support on the config" ); - store.on_fiber(|store| Memory::_new(store.0, ty)).await? + store + .on_fiber(|store| Self::_new(store.0, ty, None)) + .await? } - fn _new(store: &mut StoreOpaque, ty: MemoryType) -> Result { + /// Creates a new WebAssembly memory given a [`SharedMemory`]. + /// + /// + /// # Examples + /// + /// ``` + /// # use wasmtime::*; + /// # fn main() -> anyhow::Result<()> { + /// let mut config = Config::new(); + /// config.wasm_threads(true); + /// let engine = Engine::new(&config)?; + /// let mut store = Store::new(&engine, ()); + /// + /// let shared_memory = SharedMemory::new(1, 2)?; + /// let memory = Memory::from_shared_memory(&mut store, shared_memory)?; + /// let module = Module::new(&engine, "(module (memory (import \"\" \"\") 1 2 shared))")?; + /// let instance = Instance::new(&mut store, &module, &[memory.into()])?; + /// // ... + /// # Ok(()) + /// # } + /// ``` + pub fn from_shared_memory( + mut store: impl AsContextMut, + shared_memory: &SharedMemory, + ) -> Result { + let store = store.as_context_mut(); + if !Engine::same(store.engine(), shared_memory.engine()) { + bail!("cross-`Engine` instantiation is not currently supported"); + } + + // When we clone this shared memory, we only increment its reference + // count. + let mem = shared_memory.0.clone(); + + Self::_new(store.0, shared_memory.ty(), Some(mem)) + } + + /// Attempt to convert a [Memory] into a [SharedMemory]; this is only + /// possible if the underlying [Memory] was initially created as a + /// [SharedMemory] (i.e., with the `shared` annotation). + pub fn into_shared_memory(self, mut store: impl AsContextMut) -> Result { + let store = store.as_context_mut().0; + let runtime_memory = unsafe { self.wasmtime_memory(store).as_mut().unwrap() }; + match runtime_memory.as_shared_memory() { + Some(m) => Ok(SharedMemory(m, store.engine().clone())), + None => bail!("unable to convert memory into a shared memory"), + } + } + + /// Helper function for attaching the memory to a "frankenstein" instance + fn _new( + store: &mut StoreOpaque, + ty: MemoryType, + preallocation: Option, + ) -> Result { unsafe { - let export = generate_memory_export(store, &ty)?; + let export = generate_memory_export(store, &ty, preallocation)?; Ok(Memory::from_wasmtime_memory(export, store)) } } @@ -453,7 +510,7 @@ impl Memory { /// This will attempt to add `delta` more pages of memory on to the end of /// this `Memory` instance. If successful this may relocate the memory and /// cause [`Memory::data_ptr`] to return a new value. Additionally any - /// unsafetly constructed slices into this memory may no longer be valid. + /// unsafely constructed slices into this memory may no longer be valid. /// /// On success returns the number of pages this memory previously had /// before the growth succeeded. @@ -533,11 +590,13 @@ impl Memory { ); store.on_fiber(|store| self.grow(store, delta)).await? } + fn wasmtime_memory(&self, store: &mut StoreOpaque) -> *mut wasmtime_runtime::Memory { unsafe { let export = &store[self.0]; let mut handle = wasmtime_runtime::InstanceHandle::from_vmctx(export.vmctx); - let idx = handle.memory_index(&*export.definition); + // let idx = handle.memory_index(&*export.definition); + let idx = DefinedMemoryIndex::from_u32(0); handle.get_defined_memory(idx) } } @@ -558,6 +617,7 @@ impl Memory { wasmtime_runtime::VMMemoryImport { from: export.definition, vmctx: export.vmctx, + // index: DefinedMemoryIndex::from_u32(0), // TODO incorrect... must be stored on export } } @@ -654,6 +714,116 @@ pub unsafe trait MemoryCreator: Send + Sync { ) -> Result, String>; } +/// A constructor for externally-created shared memory. +/// +/// The [threads proposal] adds the concept of "shared memory" to WebAssembly. +/// This is much the same as a Wasm linear memory (i.e., [`Memory`]), but can be +/// used concurrently by multiple agents. Because these agents may execute in +/// different threads, [`SharedMemory`] must be thread-safe. +/// +/// When the threads proposal is enabled, there are multiple ways to construct +/// shared memory: +/// 1. for imported shared memory, e.g., `(import "env" "memory" (memory 1 1 +/// shared))`, the user must supply a [`SharedMemory`] with the +/// externally-created memory and convert it to a [`Memory`] prior to +/// instantiation (see [Memory::from_shared_memory()]). +/// 2. for private or exported shared memory, e.g., `(export "env" "memory" +/// (memory 1 1 shared))`, Wasmtime will create the memory internally during +/// instantiation. +/// +/// [threads proposal]: +/// https://github.com/WebAssembly/threads/blob/master/proposals/threads/Overview.md +#[derive(Clone)] +pub struct SharedMemory(wasmtime_runtime::SharedMemory, Engine); +impl SharedMemory { + /// Construct a [`SharedMemory`] by providing both the `minimum` and + /// `maximum` number of 64K-sized pages. This call allocates the necessary + /// pages on the system. + pub fn new(engine: &Engine, ty: MemoryType) -> Result { + if !ty.is_shared() { + bail!("shared memory must have the `shared` flag enabled on its memory type") + } + debug_assert!(ty.maximum().is_some()); + + let tunables = &engine.config().tunables; + // TODO: check the tunables (only static allocation allowed) + let memory = wasmtime_runtime::SharedMemory::new(ty.wasmtime_memory().clone(), tunables)?; + Ok(Self(memory, engine.clone())) + } + + /// Return a reference to the [`Engine`] used to configure the shared + /// memory. + pub fn engine(&self) -> &Engine { + &self.1 + } + + /// Return the type of the shared memory. + pub fn ty(&self) -> MemoryType { + MemoryType::from_wasmtime_memory(&self.0.ty()) + } + + /// Returns the size, in WebAssembly pages, of this wasm memory. + pub fn size(&self) -> u64 { + (self.data_size() / wasmtime_environ::WASM_PAGE_SIZE as usize) as u64 + } + + /// Returns the byte length of this memory. + /// + /// The returned value will be a multiple of the wasm page size, 64k. + /// + /// For more information and examples see the documentation on the + /// [`Memory`] type. + pub fn data_size(&self) -> usize { + use wasmtime_runtime::RuntimeLinearMemory; + self.0.byte_size() + } + + /// Return read access to the available portion of the shared memory. Note + /// that the available pages may be between `[minimum, maximum]`. + pub fn data(&self) -> &[u8] { + unsafe { + let definition = *self.0.vmmemory_ptr(); + slice::from_raw_parts(definition.base, definition.current_length) + } + } + + /// Return write access to the available portion of the shared memory. Note + /// that the available pages may be between `[minimum, maximum]`. + pub fn data_mut(&mut self) -> &mut [u8] { + unsafe { + let definition = *self.0.vmmemory_ptr_mut(); + slice::from_raw_parts_mut(definition.base, definition.current_length) + } + } + + /// Grows this WebAssembly memory by `delta` pages. + /// + /// This will attempt to add `delta` more pages of memory on to the end of + /// this `Memory` instance. If successful this may relocate the memory and + /// cause [`Memory::data_ptr`] to return a new value. Additionally any + /// unsafely constructed slices into this memory may no longer be valid. + /// + /// On success returns the number of pages this memory previously had + /// before the growth succeeded. + /// + /// # Errors + /// + /// Returns an error if memory could not be grown, for example if it exceeds + /// the maximum limits of this memory. A + /// [`ResourceLimiter`](crate::ResourceLimiter) is another example of + /// preventing a memory to grow. + pub fn grow(&self, delta: u64) -> Result { + todo!() + // self.0.grow(delta, ) + } +} + +impl Into for SharedMemory { + fn into(self) -> wasmtime_runtime::SharedMemory { + self.0 + } +} + #[cfg(test)] mod tests { use crate::*; diff --git a/crates/wasmtime/src/trampoline.rs b/crates/wasmtime/src/trampoline.rs index 357302633ed1..46c9fa202ec6 100644 --- a/crates/wasmtime/src/trampoline.rs +++ b/crates/wasmtime/src/trampoline.rs @@ -19,8 +19,8 @@ use std::any::Any; use std::sync::Arc; use wasmtime_environ::{GlobalIndex, MemoryIndex, Module, SignatureIndex, TableIndex}; use wasmtime_runtime::{ - Imports, InstanceAllocationRequest, InstanceAllocator, OnDemandInstanceAllocator, StorePtr, - VMFunctionImport, VMSharedSignatureIndex, + Imports, InstanceAllocationRequest, InstanceAllocator, OnDemandInstanceAllocator, SharedMemory, + StorePtr, VMFunctionImport, VMSharedSignatureIndex, }; fn create_handle( @@ -68,8 +68,9 @@ pub fn generate_global_export( pub fn generate_memory_export( store: &mut StoreOpaque, m: &MemoryType, + preallocation: Option, ) -> Result { - let instance = create_memory(store, m)?; + let instance = create_memory(store, m, preallocation)?; Ok(store .instance_mut(instance) .get_exported_memory(MemoryIndex::from_u32(0))) diff --git a/crates/wasmtime/src/trampoline/memory.rs b/crates/wasmtime/src/trampoline/memory.rs index 1a5858182f52..8de717dae3ea 100644 --- a/crates/wasmtime/src/trampoline/memory.rs +++ b/crates/wasmtime/src/trampoline/memory.rs @@ -1,28 +1,83 @@ use crate::memory::{LinearMemory, MemoryCreator}; +use crate::module::BareModuleInfo; use crate::store::{InstanceId, StoreOpaque}; -use crate::trampoline::create_handle; use crate::MemoryType; use anyhow::{anyhow, Result}; use std::convert::TryFrom; use std::sync::Arc; -use wasmtime_environ::{EntityIndex, MemoryPlan, MemoryStyle, Module, WASM_PAGE_SIZE}; +use wasmtime_environ::{EntityIndex, MemoryIndex, MemoryPlan, MemoryStyle, Module, WASM_PAGE_SIZE}; use wasmtime_runtime::{ - MemoryImage, RuntimeLinearMemory, RuntimeMemoryCreator, VMMemoryDefinition, + allocate_single_memory_instance, DefaultMemoryCreator, Imports, InstanceAllocationRequest, + InstantiationError, Memory, MemoryImage, RuntimeLinearMemory, RuntimeMemoryCreator, + SharedMemory, StorePtr, VMMemoryDefinition, }; -pub fn create_memory(store: &mut StoreOpaque, memory: &MemoryType) -> Result { +/// Create a "frankenstein" instance with a single memory. +/// +/// This separate instance is necessary because Wasm objects in Wasmtime must be +/// attached to instances (versus the store, e.g.) and some objects exist +/// outside: a host-provided memory import, shared memory. +pub fn create_memory( + store: &mut StoreOpaque, + memory_ty: &MemoryType, + preallocation: Option, +) -> Result { let mut module = Module::new(); - let memory_plan = wasmtime_environ::MemoryPlan::for_memory( - memory.wasmtime_memory().clone(), + // Create a memory plan for the memory, though it will never be used for + // constructing a memory with an allocator: instead the memories are either + // preallocated (i.e., shared memory) or allocated manually below. + let plan = wasmtime_environ::MemoryPlan::for_memory( + memory_ty.wasmtime_memory().clone(), &store.engine().config().tunables, ); - let memory_id = module.memory_plans.push(memory_plan); + module.memory_plans.push(plan.clone()); + + let memory = match &preallocation { + // If we are passing in a pre-allocated shared memory, we can clone its + // `Arc`. We know that a preallocated memory *must* be shared--it could + // be used by several instances. + Some(shared_memory) => shared_memory.clone().as_memory(), + // If we do not have a pre-allocated memory, then we create it here and + // associate it with the "frankenstein" instance, which now owns it. + None => { + let creator = &DefaultMemoryCreator; + let store = unsafe { + store + .traitobj() + .as_mut() + .expect("the store pointer cannot be null here") + }; + Memory::new_dynamic(&plan, creator, store, None) + .map_err(|err| InstantiationError::Resource(err.into()))? + } + }; + + // Since we have only associated a single memory with the "frankenstein" + // instance, it must be exported at index 0. module .exports - .insert(String::new(), EntityIndex::Memory(memory_id)); + .insert(String::new(), EntityIndex::Memory(MemoryIndex::from_u32(0))); - create_handle(module, store, Box::new(()), &[], None) + // We create an instance in the on-demand allocator when creating handles + // associated with external objects. The configured instance allocator + // should only be used when creating module instances as we don't want host + // objects to count towards instance limits. + let runtime_info = &BareModuleInfo::maybe_imported_func(Arc::new(module), None).into_traitobj(); + let host_state = Box::new(()); + let imports = Imports::default(); + let request = InstanceAllocationRequest { + imports, + host_state, + store: StorePtr::new(store.traitobj()), + runtime_info, + }; + + unsafe { + let handle = allocate_single_memory_instance(request, memory)?; + let instance_id = store.add_instance(handle.clone(), true); + Ok(instance_id) + } } struct LinearMemoryProxy { @@ -53,7 +108,6 @@ impl RuntimeLinearMemory for LinearMemoryProxy { true } - #[cfg(feature = "pooling-allocator")] fn as_any_mut(&mut self) -> &mut dyn std::any::Any { self } diff --git a/crates/wasmtime/src/types.rs b/crates/wasmtime/src/types.rs index b7dc9a0a7927..ad980b7931e0 100644 --- a/crates/wasmtime/src/types.rs +++ b/crates/wasmtime/src/types.rs @@ -373,6 +373,25 @@ impl MemoryType { } } + /// Creates a new descriptor for shared WebAssembly memory given the + /// specified limits of the memory. + /// + /// The `minimum` and `maximum` values here are specified in units of + /// WebAssembly pages, which are 64k. + /// + /// Note that shared memories are part of the threads proposal for + /// WebAssembly which is not standardized yet. + pub fn shared(minimum: u64, maximum: u64) -> MemoryType { + MemoryType { + ty: Memory { + memory64: false, + shared: true, + minimum, + maximum: Some(maximum), + }, + } + } + /// Returns whether this is a 64-bit memory or not. /// /// Note that 64-bit memories are part of the memory64 proposal for @@ -381,6 +400,14 @@ impl MemoryType { self.ty.memory64 } + /// Returns whether this is a shared memory or not. + /// + /// Note that shared memories are part of the threads proposal for + /// WebAssembly which is not standardized yet. + pub fn is_shared(&self) -> bool { + self.ty.shared + } + /// Returns minimum number of WebAssembly pages this memory must have. /// /// Note that the return value, while a `u64`, will always fit into a `u32` diff --git a/tests/all/main.rs b/tests/all/main.rs index 2db9e611d8d7..5bf8992fc3e8 100644 --- a/tests/all/main.rs +++ b/tests/all/main.rs @@ -30,6 +30,7 @@ mod relocs; mod stack_overflow; mod store; mod table; +mod threads; mod traps; mod wast; diff --git a/tests/all/pooling_allocator.rs b/tests/all/pooling_allocator.rs index 0509df782583..b68dd1602ad4 100644 --- a/tests/all/pooling_allocator.rs +++ b/tests/all/pooling_allocator.rs @@ -630,11 +630,10 @@ fn instance_too_large() -> Result<()> { let engine = Engine::new(&config)?; let expected = "\ -instance allocation for this module requires 304 bytes which exceeds the \ +instance allocation for this module requires 320 bytes which exceeds the \ configured maximum of 16 bytes; breakdown of allocation requirement: - * 78.95% - 240 bytes - instance state management - * 5.26% - 16 bytes - jit store state + * 80.00% - 256 bytes - instance state management "; match Module::new(&engine, "(module)") { Ok(_) => panic!("should have failed to compile"), @@ -648,11 +647,11 @@ configured maximum of 16 bytes; breakdown of allocation requirement: lots_of_globals.push_str(")"); let expected = "\ -instance allocation for this module requires 1904 bytes which exceeds the \ +instance allocation for this module requires 1920 bytes which exceeds the \ configured maximum of 16 bytes; breakdown of allocation requirement: - * 12.61% - 240 bytes - instance state management - * 84.03% - 1600 bytes - defined globals + * 13.33% - 256 bytes - instance state management + * 83.33% - 1600 bytes - defined globals "; match Module::new(&engine, &lots_of_globals) { Ok(_) => panic!("should have failed to compile"), diff --git a/tests/all/threads.rs b/tests/all/threads.rs new file mode 100644 index 000000000000..3ec5e6240f51 --- /dev/null +++ b/tests/all/threads.rs @@ -0,0 +1,168 @@ +use anyhow::Result; +use std::sync::{Arc, RwLock}; +use wasmtime::*; + +#[test] +fn test_instantiate_shared_memory() -> Result<()> { + let wat = r#"(module (memory 1 1 shared))"#; + let mut config = Config::new(); + config.wasm_threads(true); + let engine = Engine::new(&config)?; + let module = Module::new(&engine, wat)?; + let mut store = Store::new(&engine, ()); + let _instance = Instance::new(&mut store, &module, &[])?; + Ok(()) +} + +#[test] +fn test_import_shared_memory() -> Result<()> { + let wat = r#"(module (import "env" "memory" (memory 1 5 shared)))"#; + let mut config = Config::new(); + config.wasm_threads(true); + let engine = Engine::new(&config)?; + let module = Module::new(&engine, wat)?; + let mut store = Store::new(&engine, ()); + let shared_memory = SharedMemory::new(&engine, MemoryType::shared(1, 5))?; + let memory = Memory::from_shared_memory(&mut store, &shared_memory)?; + let _instance = Instance::new(&mut store, &module, &[memory.into()])?; + Ok(()) +} + +#[test] +fn test_export_shared_memory() -> Result<()> { + let wat = r#"(module (memory (export "memory") 1 5 shared))"#; + let mut config = Config::new(); + config.wasm_threads(true); + let engine = Engine::new(&config)?; + let module = Module::new(&engine, wat)?; + let mut store = Store::new(&engine, ()); + let instance = Instance::new(&mut store, &module, &[])?; + let shared_memory = instance + .get_memory(&mut store, "memory") + .unwrap() + .into_shared_memory(&mut store)?; + shared_memory.data(); + Ok(()) +} + +#[test] +fn test_construct_memory_with_shared_type() -> Result<()> { + // let memory = Memory::new(&mut store, MemoryType::shared(1, 5))?; + Ok(()) +} + +#[test] +fn test_sharing_of_shared_memory() -> Result<()> { + let wat = r#"(module + (import "env" "memory" (memory 1 5 shared)) + (func (export "first_word") (result i32) (i32.load (i32.const 0))) + )"#; + let mut config = Config::new(); + config.wasm_threads(true); + let engine = Engine::new(&config)?; + let module = Module::new(&engine, wat)?; + let mut store = Store::new(&engine, ()); + let mut shared_memory = SharedMemory::new(&engine, MemoryType::shared(1, 5))?; + let memory = Memory::from_shared_memory(&mut store, &shared_memory)?; + let instance1 = Instance::new(&mut store, &module, &[memory.into()])?; + let instance2 = Instance::new(&mut store, &module, &[memory.into()])?; + + // Modify the memory in one place. + shared_memory.data_mut()[0] = 42; + + // Verify that the memory is the same in all shared locations. + let shared_memory_first_word = i32::from_le_bytes(shared_memory.data()[0..4].try_into()?); + let memory_first_word = i32::from_le_bytes(memory.data(&store)[0..4].try_into()?); + let instance1_first_word = instance1 + .get_typed_func::<(), i32, _>(&mut store, "first_word")? + .call(&mut store, ())?; + let instance2_first_word = instance2 + .get_typed_func::<(), i32, _>(&mut store, "first_word")? + .call(&mut store, ())?; + assert_eq!(shared_memory_first_word, 42); + assert_eq!(memory_first_word, 42); + assert_eq!(instance1_first_word, 42); + assert_eq!(instance2_first_word, 42); + + Ok(()) +} + +#[test] +fn test_probe_shared_memory_size() -> Result<()> { + let wat = r#"(module + (memory (export "memory") 1 1 shared) + (func (export "size") (result i32) (memory.size)) + )"#; + let mut config = Config::new(); + config.wasm_threads(true); + let engine = Engine::new(&config)?; + let module = Module::new(&engine, wat)?; + let mut store = Store::new(&engine, ()); + let instance = Instance::new(&mut store, &module, &[])?; + let size_fn = instance.get_typed_func::<(), i32, _>(&mut store, "size")?; + + assert_eq!(size_fn.call(&mut store, ())?, 1); + assert_eq!( + instance + .get_memory(&mut store, "memory") + .unwrap() + .size(&store), + 1 + ); + + Ok(()) +} + +#[test] +fn test_grow_memory_in_multiple_threads() -> Result<()> { + let wat = r#"(module + (import "env" "memory" (memory 1 10 shared)) + (func (export "grow") (param $delta i32) (result i32) (memory.grow (local.get $delta))) + )"#; + + let mut config = Config::new(); + config.wasm_threads(true); + let engine = Arc::new(Engine::new(&config)?); + let module = Arc::new(Module::new(&engine, wat)?); + let shared_memory = SharedMemory::new(&engine, MemoryType::shared(1, 10))?; + let mut threads = vec![]; + let sizes = Arc::new(RwLock::new(vec![])); + + // Spawn several threads using a single shared memory and grow the memory + // concurrently on all threads. + for _ in 0..4 { + let engine = engine.clone(); + let module = module.clone(); + let sizes = sizes.clone(); + let shared_memory = shared_memory.clone(); + let thread = std::thread::spawn(move || { + let mut store = Store::new(&engine, ()); + let memory = Memory::from_shared_memory(&mut store, &shared_memory).unwrap(); + let instance = Instance::new(&mut store, &module, &[memory.into()]).unwrap(); + let grow = instance + .get_typed_func::(&mut store, "grow") + .unwrap(); + for _ in 0..4 { + let old_size = grow.call(&mut store, 1).unwrap(); + sizes.write().unwrap().push(old_size as u32); + } + }); + threads.push(thread); + } + + // Wait for all threads to finish. + for t in threads { + t.join().unwrap() + } + + // Ensure the returned "old memory sizes" were pushed in increasing order, + // indicating that the lock worked. + println!("Returned memory sizes: {:?}", sizes); + assert!(is_sorted(sizes.read().unwrap().as_slice())); + + Ok(()) +} + +fn is_sorted(data: &[u32]) -> bool { + data.windows(2).all(|d| d[0] <= d[1]) +}