diff --git a/kernel-hal-bare/src/arch/x86_64/mod.rs b/kernel-hal-bare/src/arch/x86_64/mod.rs index 1046af199..08009e59a 100644 --- a/kernel-hal-bare/src/arch/x86_64/mod.rs +++ b/kernel-hal-bare/src/arch/x86_64/mod.rs @@ -354,3 +354,28 @@ pub fn init() { pub fn fetch_fault_vaddr() -> VirtAddr { Cr2::read().as_u64() as _ } + +lazy_static! { + static ref RANGECHECKER: Mutex> = Mutex::new(Vec::default()); +} + +#[export_name = "dma_range_check"] +pub fn dma_check(paddr: PhysAddr, pages: usize) -> bool { + let mut checker = RANGECHECKER.lock(); + let paddr_end = paddr + pages * PAGE_SIZE; + let conflict = checker.iter().any(|range| { + range.0 <= paddr && range.1 > paddr || range.0 <= paddr_end && paddr_end < range.1 + }); + if !conflict { + checker.push((paddr, paddr_end)); + } + !conflict +} + +#[export_name = "dma_range_recycle"] +pub fn dma_recycle(paddr: PhysAddr, pages: usize) { + let paddr_end = paddr + pages * PAGE_SIZE; + RANGECHECKER + .lock() + .retain(|range| !(range.0 == paddr && range.1 == paddr_end)); +} diff --git a/kernel-hal-bare/src/lib.rs b/kernel-hal-bare/src/lib.rs index 44d6b691d..44e66ff13 100644 --- a/kernel-hal-bare/src/lib.rs +++ b/kernel-hal-bare/src/lib.rs @@ -169,10 +169,11 @@ pub fn frame_copy(src: PhysAddr, target: PhysAddr) { /// Zero `target` frame. #[export_name = "hal_frame_zero"] -pub fn frame_zero(target: PhysAddr) { +pub fn frame_zero_in_range(target: PhysAddr, start: usize, end: usize) { + assert!(start < PAGE_SIZE && end <= PAGE_SIZE); trace!("frame_zero: {:#x}", target); unsafe { - core::ptr::write_bytes(phys_to_virt(target) as *mut u8, 0, PAGE_SIZE); + core::ptr::write_bytes(phys_to_virt(target + start) as *mut u8, 0, end - start); } } diff --git a/kernel-hal-unix/src/lib.rs b/kernel-hal-unix/src/lib.rs index 186b27a9c..aaf5cbf8f 100644 --- a/kernel-hal-unix/src/lib.rs +++ b/kernel-hal-unix/src/lib.rs @@ -410,6 +410,14 @@ pub fn init() { }); } +#[export_name = "dma_range_check"] +pub fn dma_check(_paddr: PhysAddr, _pages: usize) -> bool { + true +} + +#[export_name = "dma_range_recycle"] +pub fn dma_recycle(_paddr: PhysAddr, _pages: usize) {} + #[cfg(test)] mod tests { use super::*; diff --git a/kernel-hal/src/dummy.rs b/kernel-hal/src/dummy.rs index 6e46e07de..bafc71959 100644 --- a/kernel-hal/src/dummy.rs +++ b/kernel-hal/src/dummy.rs @@ -188,7 +188,7 @@ pub fn frame_copy(_src: PhysAddr, _target: PhysAddr) { /// Zero `target` frame. #[linkage = "weak"] #[export_name = "hal_frame_zero"] -pub fn frame_zero(_target: PhysAddr) { +pub fn frame_zero_in_range(_target: PhysAddr, _start: usize, _end: usize) { unimplemented!() } @@ -261,3 +261,15 @@ pub fn vdso_constants() -> VdsoConstants { pub fn fetch_fault_vaddr() -> VirtAddr { unimplemented!() } + +#[linkage = "weak"] +#[export_name = "dma_range_check"] +pub fn dma_check(_paddr: PhysAddr, _pages: usize) -> bool { + unimplemented!() +} + +#[linkage = "weak"] +#[export_name = "dma_range_recycle"] +pub fn dma_recycle(_paddr: PhysAddr, _pages: usize) { + unimplemented!() +} diff --git a/zircon-loader/src/kcounter.rs b/zircon-loader/src/kcounter.rs index 4bec13159..4dde4407f 100644 --- a/zircon-loader/src/kcounter.rs +++ b/zircon-loader/src/kcounter.rs @@ -41,7 +41,7 @@ pub fn create_kcounter_vmo() -> (Arc, Arc) { kcounters_arena_end as usize / PAGE_SIZE, "all kcounters must in the same page" ); - unsafe { VmObject::new_physical(paddr, 1) } + VmObject::new_physical(paddr, 1) }; kcounters_vmo.set_name("counters/arena"); (counter_name_vmo, kcounters_vmo) diff --git a/zircon-loader/src/lib.rs b/zircon-loader/src/lib.rs index 08bb8e22b..406aa228f 100644 --- a/zircon-loader/src/lib.rs +++ b/zircon-loader/src/lib.rs @@ -143,9 +143,9 @@ pub fn run_userboot(images: &Images>, cmdline: &str) -> Arc ZxResult; /// Create a child VMO. - fn create_child(&self, offset: usize, len: usize, user_id: KoID) -> Arc; + fn create_child( + &self, + is_slice: bool, + offset: usize, + len: usize, + user_id: KoID, + ) -> Arc; fn append_mapping(&self, mapping: Weak); @@ -71,6 +77,8 @@ pub trait VMObjectTrait: Sync + Send { fn share_count(&self) -> usize; fn committed_pages_in_range(&self, start_idx: usize, end_idx: usize) -> usize; + + fn zero(&self, offset: usize, len: usize) -> ZxResult; } pub struct VmObject { @@ -79,6 +87,7 @@ pub struct VmObject { children: Mutex>>, _counter: CountHelper, resizable: bool, + is_slice: bool, inner: Arc, } @@ -97,6 +106,7 @@ impl VmObject { parent: Default::default(), children: Mutex::new(Vec::new()), resizable, + is_slice: false, _counter: CountHelper::new(), inner: VMObjectPaged::new(base.id, pages), base, @@ -104,35 +114,52 @@ impl VmObject { } /// Create a new VMO representing a piece of contiguous physical memory. - /// - /// # Safety - /// /// You must ensure nobody has the ownership of this piece of memory yet. - #[allow(unsafe_code)] - pub unsafe fn new_physical(paddr: PhysAddr, pages: usize) -> Arc { + pub fn new_physical(paddr: PhysAddr, pages: usize) -> Arc { Arc::new(VmObject { base: KObjectBase::with_signal(Signal::VMO_ZERO_CHILDREN), parent: Default::default(), children: Mutex::new(Vec::new()), resizable: true, + is_slice: false, _counter: CountHelper::new(), inner: VMObjectPhysical::new(paddr, pages), }) } /// Create a child VMO. - pub fn create_child(self: &Arc, resizable: bool, offset: usize, len: usize) -> Arc { + pub fn create_child( + self: &Arc, + is_slice: bool, + resizable: bool, + offset: usize, + len: usize, + ) -> Arc { + assert!(!(is_slice && resizable)); + if self.is_slice { + assert!(is_slice, "create a not-slice child for a slice parent!!!"); + } let base = KObjectBase::with_signal(Signal::VMO_ZERO_CHILDREN); base.set_name(&self.base.name()); let child = Arc::new(VmObject { - parent: Arc::downgrade(self), + parent: if is_slice && self.is_slice { + self.parent.clone() + } else { + Arc::downgrade(self) + }, children: Mutex::new(Vec::new()), resizable, + is_slice, _counter: CountHelper::new(), - inner: self.inner.create_child(offset, len, base.id), + inner: self.inner.create_child(is_slice, offset, len, base.id), base, }); - self.children.lock().push(Arc::downgrade(&child)); + if self.is_slice { + let arc_parent = self.parent.upgrade().unwrap(); + arc_parent.children.lock().push(Arc::downgrade(&child)); + } else { + self.children.lock().push(Arc::downgrade(&child)); + } self.base.signal_clear(Signal::VMO_ZERO_CHILDREN); child } @@ -182,6 +209,10 @@ impl VmObject { pub fn is_resizable(&self) -> bool { self.resizable } + + pub fn is_slice(&self) -> bool { + self.is_slice + } } impl Deref for VmObject { @@ -197,6 +228,14 @@ impl Drop for VmObject { if let Some(parent) = self.parent.upgrade() { let mut children = parent.children.lock(); children.retain(|c| c.strong_count() != 0); + children.iter().for_each(|child| { + let arc_child = child.upgrade().unwrap(); + let mut locked_children = arc_child.children.lock(); + locked_children.retain(|c| c.strong_count() != 0); + if locked_children.is_empty() { + arc_child.base.signal_set(Signal::VMO_ZERO_CHILDREN); + } + }); if children.is_empty() { parent.base.signal_set(Signal::VMO_ZERO_CHILDREN); } diff --git a/zircon-object/src/vm/vmo/paged.rs b/zircon-object/src/vm/vmo/paged.rs index 8f9b9ce47..68634cf90 100644 --- a/zircon-object/src/vm/vmo/paged.rs +++ b/zircon-object/src/vm/vmo/paged.rs @@ -30,6 +30,7 @@ enum VMOType { /// The right child. right: WeakRef, }, + Slice, } impl VMOType { @@ -51,6 +52,10 @@ impl VMOType { fn is_hidden(&self) -> bool { matches!(self, VMOType::Hidden { .. }) } + + fn is_slice(&self) -> bool { + matches!(self, VMOType::Slice) + } } /// The main VM object type, holding a list of pages. @@ -81,6 +86,8 @@ struct VMObjectPagedInner { cache_policy: CachePolicy, /// A weak reference to myself. self_ref: WeakRef, + /// Children created by create_slice + children: Vec, } /// Page state in VMO. @@ -151,6 +158,7 @@ impl VMObjectPaged { mappings: Vec::new(), cache_policy: CachePolicy::Cached, self_ref: Default::default(), + children: Vec::new(), }) } @@ -219,6 +227,32 @@ impl VMObjectTrait for VMObjectPaged { }) } + fn zero(&self, offset: usize, len: usize) -> ZxResult { + if offset + len > self.inner.lock().size { + return Err(ZxError::OUT_OF_RANGE); + } + let iter = BlockIter { + begin: offset, + end: offset + len, + block_size_log2: 12, + }; + let mut unwanted = VecDeque::new(); + for block in iter { + //let paddr = self.commit_page(block.block, MMUFlags::READ)?; + if block.len() == PAGE_SIZE { + let _ = self.commit_page(block.block, MMUFlags::WRITE)?; + unwanted.push_back(block.block); + self.inner.lock().frames.remove(&block.block); + } else if self.committed_pages_in_range(block.block, block.block + 1) != 0 { + // check whether this page is initialized, otherwise nothing should be done + let paddr = self.commit_page(block.block, MMUFlags::WRITE)?; + kernel_hal::frame_zero_in_range(paddr, block.begin, block.end); + } + } + self.inner.lock().release_unwanted_pages(unwanted); + Ok(()) + } + fn len(&self) -> usize { self.inner.lock().size } @@ -247,8 +281,12 @@ impl VMObjectTrait for VMObjectPaged { fn decommit(&self, offset: usize, len: usize) -> ZxResult { let mut inner = self.inner.lock(); - // non-slice child VMOs do not support decommit. - if inner.parent.is_some() { + if inner.type_.is_slice() { + let parent_offset = offset + inner.parent_offset; + return inner.parent.as_ref().unwrap().decommit(parent_offset, len); + } + let check = inner.parent.is_none(); + if !check { return Err(ZxError::NOT_SUPPORTED); } let start_page = offset / PAGE_SIZE; @@ -259,10 +297,20 @@ impl VMObjectTrait for VMObjectPaged { Ok(()) } - fn create_child(&self, offset: usize, len: usize, user_id: KoID) -> Arc { + fn create_child( + &self, + is_slice: bool, + offset: usize, + len: usize, + user_id: KoID, + ) -> Arc { assert!(page_aligned(offset)); assert!(page_aligned(len)); - self.inner.lock().create_child(offset, len, user_id) + if !is_slice { + self.inner.lock().create_child(offset, len, user_id) + } else { + self.inner.lock().create_slice(offset, len, user_id) + } } fn append_mapping(&self, mapping: Weak) { @@ -346,6 +394,15 @@ impl VMObjectPaged { child: &WeakRef, ) -> ZxResult { let mut inner = self.inner.lock(); + if inner.type_.is_slice() { + assert!((inner.parent_limit - inner.parent_offset) / PAGE_SIZE > page_idx); + let parent_idx = page_idx + inner.parent_offset / PAGE_SIZE; + return inner.parent.as_ref().unwrap().commit_page_internal( + parent_idx, + flags, + &inner.self_ref, + ); + } // special case let no_parent = inner.parent.is_none(); let no_frame = !inner.frames.contains_key(&page_idx); @@ -363,7 +420,7 @@ impl VMObjectPaged { } // lazy allocate zero frame let target_frame = PhysFrame::alloc().ok_or(ZxError::NO_MEMORY)?; - kernel_hal::frame_zero(target_frame.addr()); + kernel_hal::frame_zero_in_range(target_frame.addr(), 0, PAGE_SIZE); if out_of_range { // can never be a hidden vmo assert!(!inner.type_.is_hidden()); @@ -534,7 +591,7 @@ impl VMObjectPagedInner { /// Count committed pages of the VMO. fn committed_pages_in_range(&self, start_idx: usize, end_idx: usize) -> usize { assert!( - start_idx < self.size / PAGE_SIZE, + start_idx < self.size / PAGE_SIZE || start_idx == 0, "start_idx {:#x}, self.size {:#x}", start_idx, self.size @@ -564,6 +621,9 @@ impl VMObjectPagedInner { break; } } + if inner.user_id != self.user_id { + break; + } current_idx += inner.parent_offset / PAGE_SIZE; if current_idx >= inner.parent_limit / PAGE_SIZE { break; @@ -584,6 +644,10 @@ impl VMObjectPagedInner { /// ^remove /// ``` fn remove_child(&mut self, child: &WeakRef) { + if !self.type_.is_hidden() { + self.children.retain(|ch| !child.ptr_eq(ch)); + return; + } let (tag, other_child) = self.type_.get_tag_and_other(child); let arc_child = other_child.upgrade().unwrap(); let mut child = arc_child.inner.lock(); @@ -613,6 +677,47 @@ impl VMObjectPagedInner { child.parent = self.parent.take(); } + /// Create a slice child VMO + fn create_slice(&mut self, offset: usize, len: usize, user_id: KoID) -> Arc { + let (parent, parent_offset, parent_limit) = if self.type_.is_slice() { + ( + self.parent.clone(), + offset + self.parent_offset, + (offset + len).min(self.size) + self.parent_offset, + ) + } else { + ( + Some(self.self_ref.upgrade().unwrap()), + offset, + (offset + len).min(self.size), + ) + }; + let child = VMObjectPaged::wrap(VMObjectPagedInner { + user_id, + type_: VMOType::Slice, + parent, + parent_offset, + parent_limit, + size: len, + frames: BTreeMap::new(), + mappings: Vec::new(), + cache_policy: CachePolicy::Cached, + self_ref: Default::default(), + children: Vec::new(), + }); + let mut inner = child.inner.lock(); + inner.self_ref = Arc::downgrade(&child); + if self.type_.is_slice() { + let parent = self.parent.as_ref().unwrap(); + let mut locked_parent = parent.inner.lock(); + locked_parent.children.push(inner.self_ref.clone()); + } else { + self.children.push(inner.self_ref.clone()); + } + drop(inner); + child + } + /// Create a snapshot child VMO. fn create_child(&mut self, offset: usize, len: usize, user_id: KoID) -> Arc { // create child VMO @@ -627,6 +732,7 @@ impl VMObjectPagedInner { mappings: Vec::new(), cache_policy: CachePolicy::Cached, self_ref: Default::default(), + children: Vec::new(), }); // construct a hidden VMO as shared parent let hidden = VMObjectPaged::wrap(VMObjectPagedInner { @@ -643,6 +749,7 @@ impl VMObjectPagedInner { mappings: Vec::new(), cache_policy: CachePolicy::Cached, self_ref: Default::default(), + children: Vec::new(), }); // update parent's child if let Some(parent) = self.parent.take() { @@ -690,47 +797,50 @@ impl VMObjectPagedInner { let mut child = self.self_ref.clone(); while let Some(parent) = option_parent { let mut locked_parent = parent.inner.lock(); - if locked_parent.user_id == self.user_id { - let (tag, other) = locked_parent.type_.get_tag_and_other(&child); - let arc_other = other.upgrade().unwrap(); - let mut locked_other = arc_other.inner.lock(); - let start = locked_other.parent_offset / PAGE_SIZE; - let end = locked_other.parent_limit / PAGE_SIZE; - for _ in 0..unwanted.len() { - let idx = unwanted.pop_front().unwrap(); - // if the frame is in locked_other's range, check if it can be move to locked_other - if start <= idx && idx < end { - if locked_parent.frames.contains_key(&idx) { - let mut to_insert = locked_parent.frames.remove(&idx).unwrap(); - if to_insert.tag != tag.negate() { - to_insert.tag = PageStateTag::Owned; - locked_other.frames.insert(idx - start, to_insert); - } + let (tag, other) = locked_parent.type_.get_tag_and_other(&child); + let arc_other = other.upgrade().unwrap(); + let mut locked_other = arc_other.inner.lock(); + let start = locked_other.parent_offset / PAGE_SIZE; + let end = locked_other.parent_limit / PAGE_SIZE; + for _ in 0..unwanted.len() { + let idx = unwanted.pop_front().unwrap(); + // if the frame is in locked_other's range, check if it can be move to locked_other + if start <= idx && idx < end { + if locked_parent.frames.contains_key(&idx) { + let mut to_insert = locked_parent.frames.remove(&idx).unwrap(); + if to_insert.tag != tag.negate() { + to_insert.tag = PageStateTag::Owned; + locked_other.frames.insert(idx - start, to_insert); } + unwanted.push_back(idx + locked_parent.parent_offset / PAGE_SIZE); + } + } else { + // otherwise, if it exists in our frames, remove it; if not, push_back it again + if locked_parent.frames.contains_key(&idx) { + locked_parent.frames.remove(&idx); } else { - // otherwise, if it exists in our frames, remove it; if not, push_back it again - if locked_parent.frames.contains_key(&idx) { - locked_parent.frames.remove(&idx); - } else { - unwanted.push_back(idx + locked_parent.parent_offset / PAGE_SIZE); - } + unwanted.push_back(idx + locked_parent.parent_offset / PAGE_SIZE); } } - child = locked_parent.self_ref.clone(); - option_parent = locked_parent.parent.clone(); - drop(locked_parent); - } else { - break; } + child = locked_parent.self_ref.clone(); + option_parent = locked_parent.parent.clone(); + drop(locked_parent); } } fn resize(&mut self, new_size: usize) { - if new_size < self.size { + if new_size == 0 && new_size < self.size { + self.frames.clear(); + if let Some(parent) = self.parent.as_ref() { + parent.inner.lock().remove_child(&self.self_ref); + self.parent = None; + } + } else if new_size < self.size { let mut unwanted = VecDeque::::new(); let parent_end = (self.parent_limit - self.parent_offset) / PAGE_SIZE; for i in new_size / PAGE_SIZE..self.size / PAGE_SIZE { - if self.frames.remove(&i).is_none() && parent_end > i { + if parent_end > i { unwanted.push_back(i); } } @@ -774,7 +884,7 @@ mod tests { #[test] fn create_child() { let vmo = VmObject::new_paged(1); - let child_vmo = vmo.create_child(false, 0, PAGE_SIZE); + let child_vmo = vmo.create_child(false, false, 0, PAGE_SIZE); // write to parent and make sure clone doesn't see it vmo.test_write(0, 1); @@ -791,8 +901,8 @@ mod tests { #[ignore] // FIXME fn zero_page_write() { let vmo0 = VmObject::new_paged(1); - let vmo1 = vmo0.create_child(false, 0, PAGE_SIZE); - let vmo2 = vmo0.create_child(false, 0, PAGE_SIZE); + let vmo1 = vmo0.create_child(false, false, 0, PAGE_SIZE); + let vmo2 = vmo0.create_child(false, false, 0, PAGE_SIZE); let vmos = [vmo0, vmo1, vmo2]; let origin = vmo_page_bytes(); @@ -819,9 +929,9 @@ mod tests { fn overflow() { let vmo0 = VmObject::new_paged(2); vmo0.test_write(0, 1); - let vmo1 = vmo0.create_child(false, 0, 2 * PAGE_SIZE); + let vmo1 = vmo0.create_child(false, false, 0, 2 * PAGE_SIZE); vmo1.test_write(1, 2); - let vmo2 = vmo1.create_child(false, 0, 3 * PAGE_SIZE); + let vmo2 = vmo1.create_child(false, false, 0, 3 * PAGE_SIZE); vmo2.test_write(2, 3); assert_eq!(vmo0.get_info().committed_bytes as usize, PAGE_SIZE); assert_eq!(vmo1.get_info().committed_bytes as usize, PAGE_SIZE); diff --git a/zircon-object/src/vm/vmo/physical.rs b/zircon-object/src/vm/vmo/physical.rs index add956de7..71622aeaf 100644 --- a/zircon-object/src/vm/vmo/physical.rs +++ b/zircon-object/src/vm/vmo/physical.rs @@ -27,15 +27,18 @@ impl VMObjectPhysicalInner { } } +impl Drop for VMObjectPhysical { + fn drop(&mut self) { + kernel_hal::dma_recycle(self.paddr, self.pages); + } +} + impl VMObjectPhysical { /// Create a new VMO representing a piece of contiguous physical memory. - /// - /// # Safety - /// /// You must ensure nobody has the ownership of this piece of memory yet. - #[allow(unsafe_code)] - pub unsafe fn new(paddr: PhysAddr, pages: usize) -> Arc { + pub fn new(paddr: PhysAddr, pages: usize) -> Arc { assert!(page_aligned(paddr)); + assert!(kernel_hal::dma_check(paddr, pages)); Arc::new(VMObjectPhysical { paddr, pages, @@ -82,7 +85,13 @@ impl VMObjectTrait for VMObjectPhysical { Ok(()) } - fn create_child(&self, _offset: usize, _len: usize, _user_id: KoID) -> Arc { + fn create_child( + &self, + _is_slice: bool, + _offset: usize, + _len: usize, + _user_id: KoID, + ) -> Arc { unimplemented!() } @@ -128,6 +137,10 @@ impl VMObjectTrait for VMObjectPhysical { fn committed_pages_in_range(&self, _start_idx: usize, _end_idx: usize) -> usize { unimplemented!() } + + fn zero(&self, _offset: usize, _len: usize) -> ZxResult { + unimplemented!() + } } #[cfg(test)] @@ -138,7 +151,7 @@ mod tests { #[test] fn read_write() { - let vmo = unsafe { VmObject::new_physical(0x1000, 2) }; + let vmo = VmObject::new_physical(0x1000, 2); let vmphy = vmo.inner.clone(); assert_eq!(vmphy.get_cache_policy(), CachePolicy::Uncached); super::super::tests::read_write(&vmo); diff --git a/zircon-syscall/src/vmo.rs b/zircon-syscall/src/vmo.rs index 18113f2e9..f4085f8fd 100644 --- a/zircon-syscall/src/vmo.rs +++ b/zircon-syscall/src/vmo.rs @@ -2,9 +2,27 @@ use { super::*, bitflags::bitflags, kernel_hal::CachePolicy, + numeric_enum_macro::numeric_enum, zircon_object::{resource::*, task::PolicyCondition, vm::*}, }; +fn check_child_size(size: usize) -> ZxResult { + let new_size = if !page_aligned(size) { + if let Some(res) = size.checked_add(PAGE_SIZE) { + round_down_pages(res) + } else { + return Err(ZxError::OUT_OF_RANGE); + } + } else { + size + }; + if new_size > 0xffff_ffff_fffe_0000 { + Err(ZxError::OUT_OF_RANGE) + } else { + Ok(new_size) + } +} + impl Syscall<'_> { pub fn sys_vmo_create( &self, @@ -116,9 +134,7 @@ impl Syscall<'_> { "vmo_create_child: handle={:#x}, options={:?}, offset={:#x}, size={:#x}", handle_value, options, offset, size ); - if !options.contains(VmoCloneFlags::SNAPSHOT_AT_LEAST_ON_WRITE) { - return Err(ZxError::NOT_SUPPORTED); - } + let is_slice = options.contains(VmoCloneFlags::SLICE); let proc = self.thread.proc(); let (vmo, parent_rights) = proc.get_object_and_rights::(handle_value)?; @@ -139,16 +155,37 @@ impl Syscall<'_> { "parent_rights: {:?} child_rights: {:?}", parent_rights, child_rights ); - let resizable = options.contains(VmoCloneFlags::RESIZABLE); + let resizable = if !is_slice { + options.contains(VmoCloneFlags::RESIZABLE) + } else if options.contains(VmoCloneFlags::RESIZABLE) { + return Err(ZxError::INVALID_ARGS); + } else { + false + }; - let child_size = roundup_pages(size); + let child_size = check_child_size(size)?; + let parent_size = vmo.len(); info!("size of child vmo: {:#x}", child_size); - let child_vmo = vmo.create_child(resizable, offset as usize, child_size); + if is_slice { + let check = if let Some(limit) = offset.checked_add(size) { + limit <= parent_size && offset < parent_size + } else { + false + }; + if !check && size != 0 { + return Err(ZxError::INVALID_ARGS); + } + if vmo.is_resizable() { + return Err(ZxError::NOT_SUPPORTED); + } + } else if vmo.is_slice() { + return Err(ZxError::NOT_SUPPORTED); + } + let child_vmo = vmo.create_child(is_slice, resizable, offset, child_size); out.write(proc.add_handle(Handle::new(child_vmo, child_rights)))?; Ok(()) } - #[allow(unsafe_code)] pub fn sys_vmo_create_physical( &self, rsrc: HandleValue, @@ -170,7 +207,7 @@ impl Syscall<'_> { if paddr.overflowing_add(size).1 { return Err(ZxError::INVALID_ARGS); } - let vmo = unsafe { VmObject::new_physical(paddr, size / PAGE_SIZE) }; + let vmo = VmObject::new_physical(paddr, size / PAGE_SIZE); let handle_value = proc.add_handle(Handle::new(vmo, Rights::DEFAULT_VMO | Rights::EXECUTE)); out.write(handle_value)?; Ok(()) @@ -201,25 +238,35 @@ impl Syscall<'_> { "vmo.op_range: handle={:#x}, op={:#X}, offset={:#x}, len={:#x}, buffer_size={:#x}", handle_value, op, offset, len, _buffer_size, ); + let op = VmoOpType::try_from(op).or(Err(ZxError::INVALID_ARGS))?; let proc = self.thread.proc(); let (vmo, rights) = proc.get_object_and_rights::(handle_value)?; - if !page_aligned(offset) || !page_aligned(len) { - return Err(ZxError::INVALID_ARGS); - } match op { - VMO_OP_COMMIT => { + VmoOpType::Commit => { if !rights.contains(Rights::WRITE) { return Err(ZxError::ACCESS_DENIED); } + if !page_aligned(offset) || !page_aligned(len) { + return Err(ZxError::INVALID_ARGS); + } vmo.commit(offset, len)?; Ok(()) } - VMO_OP_DECOMMIT => { + VmoOpType::Decommit => { if !rights.contains(Rights::WRITE) { return Err(ZxError::ACCESS_DENIED); } + if !page_aligned(offset) || !page_aligned(len) { + return Err(ZxError::INVALID_ARGS); + } vmo.decommit(offset, len) } + VmoOpType::Zero => { + if !rights.contains(Rights::WRITE) { + return Err(ZxError::ACCESS_DENIED); + } + vmo.zero(offset, len) + } _ => unimplemented!(), } } @@ -243,6 +290,18 @@ bitflags! { } } -/// VMO Opcodes (for vmo_op_range) -const VMO_OP_COMMIT: u32 = 1; -const VMO_OP_DECOMMIT: u32 = 2; +numeric_enum! { + #[repr(u32)] + /// VMO Opcodes (for vmo_op_range) + pub enum VmoOpType { + Commit = 1, + Decommit = 2, + Lock = 3, + Unlock = 4, + CacheSync = 6, + CacheInvalidate = 7, + CacheClean = 8, + CacheCleanInvalidate = 9, + Zero = 10, + } +}