diff --git a/src/intptrcast.rs b/src/intptrcast.rs index 618cf9df7f..dcb1879042 100644 --- a/src/intptrcast.rs +++ b/src/intptrcast.rs @@ -162,11 +162,14 @@ impl<'mir, 'tcx> GlobalStateInner { Ok(Pointer::new(Some(Provenance::Wildcard), Size::from_bytes(addr))) } - fn alloc_base_addr(ecx: &MiriInterpCx<'mir, 'tcx>, alloc_id: AllocId) -> u64 { + fn alloc_base_addr( + ecx: &MiriInterpCx<'mir, 'tcx>, + alloc_id: AllocId, + ) -> InterpResult<'tcx, u64> { let mut global_state = ecx.machine.intptrcast.borrow_mut(); let global_state = &mut *global_state; - match global_state.base_addr.entry(alloc_id) { + Ok(match global_state.base_addr.entry(alloc_id) { Entry::Occupied(entry) => *entry.get(), Entry::Vacant(entry) => { // There is nothing wrong with a raw pointer being cast to an integer only after @@ -181,7 +184,10 @@ impl<'mir, 'tcx> GlobalStateInner { rng.gen_range(0..16) }; // From next_base_addr + slack, round up to adjust for alignment. - let base_addr = global_state.next_base_addr.checked_add(slack).unwrap(); + let base_addr = global_state + .next_base_addr + .checked_add(slack) + .ok_or_else(|| err_exhaust!(AddressSpaceFull))?; let base_addr = Self::align_addr(base_addr, align.bytes()); entry.insert(base_addr); trace!( @@ -197,24 +203,33 @@ impl<'mir, 'tcx> GlobalStateInner { // of at least 1 to avoid two allocations having the same base address. // (The logic in `alloc_id_from_addr` assumes unique addresses, and different // function/vtable pointers need to be distinguishable!) - global_state.next_base_addr = base_addr.checked_add(max(size.bytes(), 1)).unwrap(); + global_state.next_base_addr = base_addr + .checked_add(max(size.bytes(), 1)) + .ok_or_else(|| err_exhaust!(AddressSpaceFull))?; + // Even if `Size` didn't overflow, we might still have filled up the address space. + if global_state.next_base_addr > ecx.machine_usize_max() { + throw_exhaust!(AddressSpaceFull); + } // Given that `next_base_addr` increases in each allocation, pushing the // corresponding tuple keeps `int_to_ptr_map` sorted global_state.int_to_ptr_map.push((base_addr, alloc_id)); base_addr } - } + }) } /// Convert a relative (tcx) pointer to an absolute address. - pub fn rel_ptr_to_addr(ecx: &MiriInterpCx<'mir, 'tcx>, ptr: Pointer) -> u64 { + pub fn rel_ptr_to_addr( + ecx: &MiriInterpCx<'mir, 'tcx>, + ptr: Pointer, + ) -> InterpResult<'tcx, u64> { let (alloc_id, offset) = ptr.into_parts(); // offset is relative (AllocId provenance) - let base_addr = GlobalStateInner::alloc_base_addr(ecx, alloc_id); + let base_addr = GlobalStateInner::alloc_base_addr(ecx, alloc_id)?; // Add offset with the right kind of pointer-overflowing arithmetic. let dl = ecx.data_layout(); - dl.overflowing_offset(base_addr, offset.bytes()).0 + Ok(dl.overflowing_offset(base_addr, offset.bytes()).0) } /// When a pointer is used for a memory access, this computes where in which allocation the @@ -232,7 +247,9 @@ impl<'mir, 'tcx> GlobalStateInner { GlobalStateInner::alloc_id_from_addr(ecx, addr.bytes())? }; - let base_addr = GlobalStateInner::alloc_base_addr(ecx, alloc_id); + // This cannot fail: since we already have a pointer with that provenance, rel_ptr_to_addr + // must have been called in the past. + let base_addr = GlobalStateInner::alloc_base_addr(ecx, alloc_id).unwrap(); // Wrapping "addr - base_addr" let dl = ecx.data_layout(); diff --git a/src/machine.rs b/src/machine.rs index 01a3d7550e..8e44d4d7ad 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -971,7 +971,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { fn adjust_alloc_base_pointer( ecx: &MiriInterpCx<'mir, 'tcx>, ptr: Pointer, - ) -> Pointer { + ) -> InterpResult<'tcx, Pointer> { if cfg!(debug_assertions) { // The machine promises to never call us on thread-local or extern statics. let alloc_id = ptr.provenance; @@ -985,17 +985,17 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { _ => {} } } - let absolute_addr = intptrcast::GlobalStateInner::rel_ptr_to_addr(ecx, ptr); + let absolute_addr = intptrcast::GlobalStateInner::rel_ptr_to_addr(ecx, ptr)?; let tag = if let Some(borrow_tracker) = &ecx.machine.borrow_tracker { borrow_tracker.borrow_mut().base_ptr_tag(ptr.provenance, &ecx.machine) } else { // Value does not matter, SB is disabled BorTag::default() }; - Pointer::new( + Ok(Pointer::new( Provenance::Concrete { alloc_id: ptr.provenance, tag }, Size::from_bytes(absolute_addr), - ) + )) } #[inline(always)] diff --git a/src/shims/backtrace.rs b/src/shims/backtrace.rs index 15987eee53..ed1c6ebfec 100644 --- a/src/shims/backtrace.rs +++ b/src/shims/backtrace.rs @@ -190,9 +190,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { 0 => { // These are "mutable" allocations as we consider them to be owned by the callee. let name_alloc = - this.allocate_str(&name, MiriMemoryKind::Rust.into(), Mutability::Mut); + this.allocate_str(&name, MiriMemoryKind::Rust.into(), Mutability::Mut)?; let filename_alloc = - this.allocate_str(&filename, MiriMemoryKind::Rust.into(), Mutability::Mut); + this.allocate_str(&filename, MiriMemoryKind::Rust.into(), Mutability::Mut)?; this.write_immediate( name_alloc.to_ref(this), diff --git a/src/shims/panic.rs b/src/shims/panic.rs index db3e42faca..0ea1137200 100644 --- a/src/shims/panic.rs +++ b/src/shims/panic.rs @@ -172,7 +172,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { let this = self.eval_context_mut(); // First arg: message. - let msg = this.allocate_str(msg, MiriMemoryKind::Machine.into(), Mutability::Not); + let msg = this.allocate_str(msg, MiriMemoryKind::Machine.into(), Mutability::Not)?; // Call the lang item. let panic = this.tcx.lang_items().panic_fn().unwrap();