Skip to content

Commit

Permalink
Auto merge of #69999 - RalfJung:miri-unwind, r=oli-obk
Browse files Browse the repository at this point in the history
adjust Miri to needs of changed unwinding strategy

As expected, #67502 broke unwinding in Miri. To fix it we have to adjust parts of the engine and the panic runtime, which this PR does. The Miri-side changes are in rust-lang/miri#1227.

Cc @oli-obk @Aaron1011 @Mark-Simulacrum @Amanieu
  • Loading branch information
bors committed Mar 14, 2020
2 parents 131772c + b450e1b commit 7cdbc87
Show file tree
Hide file tree
Showing 8 changed files with 98 additions and 50 deletions.
8 changes: 5 additions & 3 deletions src/libcore/intrinsics.rs
Expand Up @@ -1892,10 +1892,12 @@ extern "rust-intrinsic" {
pub fn ptr_offset_from<T>(ptr: *const T, base: *const T) -> isize;

/// Internal hook used by Miri to implement unwinding.
/// Compiles to a NOP during non-Miri codegen.
/// ICEs when encountered during non-Miri codegen.
///
/// Perma-unstable: do not use
pub fn miri_start_panic(data: *mut (dyn crate::any::Any + crate::marker::Send)) -> ();
/// The `payload` ptr here will be exactly the one `do_catch` gets passed by `try`.
///
/// Perma-unstable: do not use.
pub fn miri_start_panic(payload: *mut u8) -> !;
}

// Some functions are defined here because they accidentally got made
Expand Down
34 changes: 21 additions & 13 deletions src/libpanic_unwind/lib.rs
Expand Up @@ -30,6 +30,8 @@
#![feature(raw)]
#![panic_runtime]
#![feature(panic_runtime)]
// `real_imp` is unused with Miri, so silence warnings.
#![cfg_attr(miri, allow(dead_code))]

use alloc::boxed::Box;
use core::any::Any;
Expand All @@ -38,25 +40,38 @@ use core::panic::BoxMeUp;
cfg_if::cfg_if! {
if #[cfg(target_os = "emscripten")] {
#[path = "emcc.rs"]
mod imp;
mod real_imp;
} else if #[cfg(target_arch = "wasm32")] {
#[path = "dummy.rs"]
mod imp;
mod real_imp;
} else if #[cfg(target_os = "hermit")] {
#[path = "hermit.rs"]
mod imp;
mod real_imp;
} else if #[cfg(all(target_env = "msvc", target_arch = "aarch64"))] {
#[path = "dummy.rs"]
mod imp;
mod real_imp;
} else if #[cfg(target_env = "msvc")] {
#[path = "seh.rs"]
mod imp;
mod real_imp;
} else {
// Rust runtime's startup objects depend on these symbols, so make them public.
#[cfg(all(target_os="windows", target_arch = "x86", target_env="gnu"))]
pub use imp::eh_frame_registry::*;
pub use real_imp::eh_frame_registry::*;
#[path = "gcc.rs"]
mod real_imp;
}
}

cfg_if::cfg_if! {
if #[cfg(miri)] {
// Use the Miri runtime.
// We still need to also load the normal runtime above, as rustc expects certain lang
// items from there to be defined.
#[path = "miri.rs"]
mod imp;
} else {
// Use the real runtime.
use real_imp as imp;
}
}

Expand All @@ -81,12 +96,5 @@ pub unsafe extern "C" fn __rust_start_panic(payload: usize) -> u32 {
let payload = payload as *mut &mut dyn BoxMeUp;
let payload = (*payload).take_box();

// Miri panic support: cfg'd out of normal builds just to be sure.
// When going through normal codegen, `miri_start_panic` is a NOP, so the
// Miri-enabled sysroot still supports normal unwinding. But when executed in
// Miri, this line initiates unwinding.
#[cfg(miri)]
core::intrinsics::miri_start_panic(payload);

imp::panic(Box::from_raw(payload))
}
20 changes: 20 additions & 0 deletions src/libpanic_unwind/miri.rs
@@ -0,0 +1,20 @@
//! Unwinding panics for Miri.
use alloc::boxed::Box;
use core::any::Any;

// The type of the payload that the Miri engine propagates through unwinding for us.
// Must be pointer-sized.
type Payload = Box<Box<dyn Any + Send>>;

pub unsafe fn panic(payload: Box<dyn Any + Send>) -> u32 {
// The payload we pass to `miri_start_panic` will be exactly the argument we get
// in `cleanup` below. So we just box it up once, to get something pointer-sized.
let payload_box: Payload = Box::new(payload);
core::intrinsics::miri_start_panic(Box::into_raw(payload_box) as *mut u8)
}

pub unsafe fn cleanup(payload_box: *mut u8) -> Box<dyn Any + Send> {
// Recover the underlying `Box`.
let payload_box: Payload = Box::from_raw(payload_box as *mut _);
*payload_box
}
26 changes: 26 additions & 0 deletions src/librustc/mir/interpret/value.rs
Expand Up @@ -272,11 +272,13 @@ impl<'tcx, Tag> Scalar<Tag> {

#[inline]
pub fn from_bool(b: bool) -> Self {
// Guaranteed to be truncated and does not need sign extension.
Scalar::Raw { data: b as u128, size: 1 }
}

#[inline]
pub fn from_char(c: char) -> Self {
// Guaranteed to be truncated and does not need sign extension.
Scalar::Raw { data: c as u128, size: 4 }
}

Expand All @@ -299,21 +301,25 @@ impl<'tcx, Tag> Scalar<Tag> {

#[inline]
pub fn from_u8(i: u8) -> Self {
// Guaranteed to be truncated and does not need sign extension.
Scalar::Raw { data: i as u128, size: 1 }
}

#[inline]
pub fn from_u16(i: u16) -> Self {
// Guaranteed to be truncated and does not need sign extension.
Scalar::Raw { data: i as u128, size: 2 }
}

#[inline]
pub fn from_u32(i: u32) -> Self {
// Guaranteed to be truncated and does not need sign extension.
Scalar::Raw { data: i as u128, size: 4 }
}

#[inline]
pub fn from_u64(i: u64) -> Self {
// Guaranteed to be truncated and does not need sign extension.
Scalar::Raw { data: i as u128, size: 8 }
}

Expand Down Expand Up @@ -341,6 +347,26 @@ impl<'tcx, Tag> Scalar<Tag> {
.unwrap_or_else(|| bug!("Signed value {:#x} does not fit in {} bits", i, size.bits()))
}

#[inline]
pub fn from_i8(i: i8) -> Self {
Self::from_int(i, Size::from_bits(8))
}

#[inline]
pub fn from_i16(i: i16) -> Self {
Self::from_int(i, Size::from_bits(16))
}

#[inline]
pub fn from_i32(i: i32) -> Self {
Self::from_int(i, Size::from_bits(32))
}

#[inline]
pub fn from_i64(i: i64) -> Self {
Self::from_int(i, Size::from_bits(64))
}

#[inline]
pub fn from_machine_isize(i: i64, cx: &impl HasDataLayout) -> Self {
Self::from_int(i, cx.data_layout().pointer_size)
Expand Down
31 changes: 12 additions & 19 deletions src/librustc_mir/interpret/eval_context.rs
Expand Up @@ -21,7 +21,7 @@ use rustc_span::source_map::{self, Span, DUMMY_SP};

use super::{
Immediate, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Memory, OpTy, Operand, Place, PlaceTy,
ScalarMaybeUndef, StackPopInfo,
ScalarMaybeUndef, StackPopJump,
};

pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
Expand Down Expand Up @@ -623,31 +623,24 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {

::log_settings::settings().indentation -= 1;
let frame = self.stack.pop().expect("tried to pop a stack frame, but there were none");
let stack_pop_info = M::stack_pop(self, frame.extra, unwinding)?;
if let (false, StackPopInfo::StopUnwinding) = (unwinding, stack_pop_info) {
bug!("Attempted to stop unwinding while there is no unwinding!");
}

// Now where do we jump next?

// Determine if we leave this function normally or via unwinding.
let cur_unwinding =
if let StackPopInfo::StopUnwinding = stack_pop_info { false } else { unwinding };

// Usually we want to clean up (deallocate locals), but in a few rare cases we don't.
// In that case, we return early. We also avoid validation in that case,
// because this is CTFE and the final value will be thoroughly validated anyway.
let (cleanup, next_block) = match frame.return_to_block {
StackPopCleanup::Goto { ret, unwind } => {
(true, Some(if cur_unwinding { unwind } else { ret }))
(true, Some(if unwinding { unwind } else { ret }))
}
StackPopCleanup::None { cleanup, .. } => (cleanup, None),
};

if !cleanup {
assert!(self.stack.is_empty(), "only the topmost frame should ever be leaked");
assert!(next_block.is_none(), "tried to skip cleanup when we have a next block!");
// Leak the locals, skip validation.
assert!(!unwinding, "tried to skip cleanup during unwinding");
// Leak the locals, skip validation, skip machine hook.
return Ok(());
}

Expand All @@ -656,13 +649,13 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.deallocate_local(local.value)?;
}

trace!(
"StackPopCleanup: {:?} StackPopInfo: {:?} cur_unwinding = {:?}",
frame.return_to_block,
stack_pop_info,
cur_unwinding
);
if cur_unwinding {
if M::stack_pop(self, frame.extra, unwinding)? == StackPopJump::NoJump {
// The hook already did everything.
// We want to skip the `info!` below, hence early return.
return Ok(());
}
// Normal return.
if unwinding {
// Follow the unwind edge.
let unwind = next_block.expect("Encountered StackPopCleanup::None when unwinding!");
self.unwind_to_block(unwind);
Expand Down Expand Up @@ -697,7 +690,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
"CONTINUING({}) {} (unwinding = {})",
self.cur_frame(),
self.frame().instance,
cur_unwinding
unwinding
);
}

Expand Down
12 changes: 6 additions & 6 deletions src/librustc_mir/interpret/machine.rs
Expand Up @@ -17,16 +17,16 @@ use super::{
/// Data returned by Machine::stack_pop,
/// to provide further control over the popping of the stack frame
#[derive(Eq, PartialEq, Debug, Copy, Clone)]
pub enum StackPopInfo {
pub enum StackPopJump {
/// Indicates that no special handling should be
/// done - we'll either return normally or unwind
/// based on the terminator for the function
/// we're leaving.
Normal,

/// Indicates that we should stop unwinding,
/// as we've reached a catch frame
StopUnwinding,
/// Indicates that we should *not* jump to the return/unwind address, as the callback already
/// took care of everything.
NoJump,
}

/// Whether this kind of memory is allowed to leak
Expand Down Expand Up @@ -276,9 +276,9 @@ pub trait Machine<'mir, 'tcx>: Sized {
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
_extra: Self::FrameExtra,
_unwinding: bool,
) -> InterpResult<'tcx, StackPopInfo> {
) -> InterpResult<'tcx, StackPopJump> {
// By default, we do not support unwinding from panics
Ok(StackPopInfo::Normal)
Ok(StackPopJump::Normal)
}

fn int_to_ptr(
Expand Down
2 changes: 1 addition & 1 deletion src/librustc_mir/interpret/mod.rs
Expand Up @@ -24,7 +24,7 @@ pub use self::place::{MPlaceTy, MemPlace, MemPlaceMeta, Place, PlaceTy};

pub use self::memory::{AllocCheck, FnVal, Memory, MemoryKind};

pub use self::machine::{AllocMap, Machine, MayLeak, StackPopInfo};
pub use self::machine::{AllocMap, Machine, MayLeak, StackPopJump};

pub use self::operand::{ImmTy, Immediate, OpTy, Operand, ScalarMaybeUndef};

Expand Down
15 changes: 7 additions & 8 deletions src/libstd/panicking.rs
Expand Up @@ -251,21 +251,20 @@ pub unsafe fn r#try<R, F: FnOnce() -> R>(f: F) -> Result<R, Box<dyn Any + Send>>
//
// We go through a transition where:
//
// * First, we set the data to be the closure that we're going to call.
// * First, we set the data field `f` to be the argumentless closure that we're going to call.
// * When we make the function call, the `do_call` function below, we take
// ownership of the function pointer. At this point the `Data` union is
// ownership of the function pointer. At this point the `data` union is
// entirely uninitialized.
// * If the closure successfully returns, we write the return value into the
// data's return slot. Note that `ptr::write` is used as it's overwriting
// uninitialized data.
// data's return slot (field `r`).
// * If the closure panics (`do_catch` below), we write the panic payload into field `p`.
// * Finally, when we come back out of the `try` intrinsic we're
// in one of two states:
//
// 1. The closure didn't panic, in which case the return value was
// filled in. We move it out of `data` and return it.
// 2. The closure panicked, in which case the return value wasn't
// filled in. In this case the entire `data` union is invalid, so
// there is no need to drop anything.
// filled in. We move it out of `data.r` and return it.
// 2. The closure panicked, in which case the panic payload was
// filled in. We move it out of `data.p` and return it.
//
// Once we stack all that together we should have the "most efficient'
// method of calling a catch panic whilst juggling ownership.
Expand Down

0 comments on commit 7cdbc87

Please sign in to comment.