diff --git a/src/error.rs b/src/error.rs index 72eedba5e3..370d59e5a3 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,15 +1,14 @@ use std::error::Error; use std::fmt; use rustc::mir; -use rustc::ty::{BareFnTy, Ty, FnSig, layout}; -use syntax::abi::Abi; +use rustc::ty::{FnSig, Ty, layout}; use memory::{Pointer, Function}; use rustc_const_math::ConstMathErr; use syntax::codemap::Span; #[derive(Clone, Debug)] pub enum EvalError<'tcx> { - FunctionPointerTyMismatch(Abi, &'tcx FnSig<'tcx>, &'tcx BareFnTy<'tcx>), + FunctionPointerTyMismatch(FnSig<'tcx>, FnSig<'tcx>), NoMirFor(String), UnterminatedCString(Pointer), DanglingPointerDeref, @@ -151,8 +150,8 @@ impl<'tcx> fmt::Display for EvalError<'tcx> { ptr.offset, ptr.offset + size, ptr.alloc_id, allocation_size) }, EvalError::NoMirFor(ref func) => write!(f, "no mir for `{}`", func), - EvalError::FunctionPointerTyMismatch(abi, sig, got) => - write!(f, "tried to call a function with abi {:?} and sig {:?} through a function pointer of type {:?}", abi, sig, got), + EvalError::FunctionPointerTyMismatch(sig, got) => + write!(f, "tried to call a function with sig {} through a function pointer of type {}", sig, got), EvalError::ArrayIndexOutOfBounds(span, len, index) => write!(f, "index out of bounds: the len is {} but the index is {} at {:?}", len, index, span), EvalError::Math(span, ref err) => diff --git a/src/eval_context.rs b/src/eval_context.rs index 0ab6e85e38..549530265e 100644 --- a/src/eval_context.rs +++ b/src/eval_context.rs @@ -9,7 +9,7 @@ use rustc::mir; use rustc::traits::Reveal; use rustc::ty::layout::{self, Layout, Size}; use rustc::ty::subst::{self, Subst, Substs}; -use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; +use rustc::ty::{self, Ty, TyCtxt, TypeFoldable, Binder}; use rustc_data_structures::indexed_vec::Idx; use syntax::codemap::{self, DUMMY_SP}; @@ -181,8 +181,6 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { Float(ConstFloat::F32(f)) => PrimVal::from_f32(f), Float(ConstFloat::F64(f)) => PrimVal::from_f64(f), - Float(ConstFloat::FInfer { .. }) => - bug!("uninferred constants only exist before typeck"), Bool(b) => PrimVal::from_bool(b), Char(c) => PrimVal::from_char(c), @@ -196,7 +194,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { Struct(_) => unimplemented!(), Tuple(_) => unimplemented!(), - Function(_) => unimplemented!(), + Function(_, _) => unimplemented!(), Array(_) => unimplemented!(), Repeat(_, _) => unimplemented!(), }; @@ -227,6 +225,13 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { self.tcx.normalize_associated_type(&substituted) } + pub fn erase_lifetimes(&self, value: &Binder) -> T + where T : TypeFoldable<'tcx> + { + let value = self.tcx.erase_late_bound_regions(value); + self.tcx.erase_regions(&value) + } + pub(super) fn type_size(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, Option> { self.type_size_with_substs(ty, self.substs()) } @@ -457,7 +462,10 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { General { discr, ref variants, .. } => { if let mir::AggregateKind::Adt(adt_def, variant, _, _) = *kind { - let discr_val = adt_def.variants[variant].disr_val; + let discr_val = adt_def.discriminants(self.tcx) + .nth(variant) + .expect("broken mir: Adt variant id invalid") + .to_u128_unchecked(); let discr_size = discr.size().bytes(); if variants[variant].packed { let ptr = self.force_allocation(dest)?.to_ptr_and_extra().0; @@ -530,7 +538,10 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { CEnum { .. } => { assert_eq!(operands.len(), 0); if let mir::AggregateKind::Adt(adt_def, variant, _, _) = *kind { - let n = adt_def.variants[variant].disr_val; + let n = adt_def.discriminants(self.tcx) + .nth(variant) + .expect("broken mir: Adt variant index invalid") + .to_u128_unchecked(); self.write_primval(dest, PrimVal::Bytes(n), dest_ty)?; } else { bug!("tried to assign {:?} to Layout::CEnum", kind); @@ -640,25 +651,29 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { } ReifyFnPointer => match self.operand_ty(operand).sty { - ty::TyFnDef(def_id, substs, fn_ty) => { - let fn_ty = self.tcx.erase_regions(&fn_ty); - let fn_ptr = self.memory.create_fn_ptr(self.tcx,def_id, substs, fn_ty); + ty::TyFnDef(def_id, substs, sig) => { + let fn_ptr = self.memory.create_fn_ptr(def_id, substs, sig); self.write_value(Value::ByVal(PrimVal::Ptr(fn_ptr)), dest, dest_ty)?; }, ref other => bug!("reify fn pointer on {:?}", other), }, UnsafeFnPointer => match dest_ty.sty { - ty::TyFnPtr(unsafe_fn_ty) => { + ty::TyFnPtr(_) => { let src = self.eval_operand(operand)?; - let ptr = src.read_ptr(&self.memory)?; - let fn_def = self.memory.get_fn(ptr.alloc_id)?.expect_concrete()?; - let unsafe_fn_ty = self.tcx.erase_regions(&unsafe_fn_ty); - let fn_ptr = self.memory.create_fn_ptr(self.tcx, fn_def.def_id, fn_def.substs, unsafe_fn_ty); - self.write_value(Value::ByVal(PrimVal::Ptr(fn_ptr)), dest, dest_ty)?; + self.write_value(src, dest, dest_ty)?; }, ref other => bug!("fn to unsafe fn cast on {:?}", other), }, + + ClosureFnPointer => match self.operand_ty(operand).sty { + ty::TyClosure(def_id, substs) => { + let fn_ty = self.tcx.closure_type(def_id); + let fn_ptr = self.memory.create_fn_ptr_from_noncapture_closure(def_id, substs, fn_ty); + self.write_value(Value::ByVal(PrimVal::Ptr(fn_ptr)), dest, dest_ty)?; + }, + ref other => bug!("reify fn pointer on {:?}", other), + }, } } @@ -668,7 +683,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { let ptr = self.force_allocation(lval)?.to_ptr(); let discr_val = self.read_discriminant_value(ptr, ty)?; if let ty::TyAdt(adt_def, _) = ty.sty { - if adt_def.variants.iter().all(|v| discr_val != v.disr_val) { + if adt_def.discriminants(self.tcx).all(|v| discr_val != v.to_u128_unchecked()) { return Err(EvalError::InvalidDiscriminant); } } else { diff --git a/src/lvalue.rs b/src/lvalue.rs index d4d292cd4e..afdb5b392d 100644 --- a/src/lvalue.rs +++ b/src/lvalue.rs @@ -137,9 +137,9 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { Local(mir::RETURN_POINTER) => self.frame().return_lvalue, Local(local) => Lvalue::Local { frame: self.stack.len() - 1, local, field: None }, - Static(def_id) => { + Static(ref static_) => { let substs = self.tcx.intern_substs(&[]); - Lvalue::Global(GlobalId { def_id, substs, promoted: None }) + Lvalue::Global(GlobalId { def_id: static_.def_id, substs, promoted: None }) } Projection(ref proj) => return self.eval_lvalue_projection(proj), diff --git a/src/memory.rs b/src/memory.rs index 459a9bed41..44959ea672 100644 --- a/src/memory.rs +++ b/src/memory.rs @@ -3,12 +3,10 @@ use std::collections::{btree_map, BTreeMap, HashMap, HashSet, VecDeque, BTreeSet use std::{fmt, iter, ptr, mem, io}; use rustc::hir::def_id::DefId; -use rustc::ty::{self, BareFnTy, ClosureTy, ClosureSubsts, TyCtxt}; +use rustc::ty::{self, PolyFnSig, ClosureSubsts}; use rustc::ty::subst::Substs; use rustc::ty::layout::{self, TargetDataLayout}; -use syntax::abi::Abi; - use error::{EvalError, EvalResult}; use value::PrimVal; @@ -109,8 +107,7 @@ impl Pointer { pub struct FunctionDefinition<'tcx> { pub def_id: DefId, pub substs: &'tcx Substs<'tcx>, - pub abi: Abi, - pub sig: &'tcx ty::FnSig<'tcx>, + pub sig: PolyFnSig<'tcx>, } /// Either a concrete function, or a glue function @@ -127,18 +124,14 @@ pub enum Function<'tcx> { DropGlue(ty::Ty<'tcx>), /// Glue required to treat the ptr part of a fat pointer /// as a function pointer - FnPtrAsTraitObject(&'tcx ty::FnSig<'tcx>), + FnPtrAsTraitObject(PolyFnSig<'tcx>), /// Glue for Closures Closure(FunctionDefinition<'tcx>), + /// Glue for noncapturing closures casted to function pointers + NonCaptureClosureAsFnPtr(FunctionDefinition<'tcx>), } impl<'tcx> Function<'tcx> { - pub fn expect_concrete(self) -> EvalResult<'tcx, FunctionDefinition<'tcx>> { - match self { - Function::Concrete(fn_def) => Ok(fn_def), - other => Err(EvalError::ExpectedConcreteFunction(other)), - } - } pub fn expect_drop_glue_real_ty(self) -> EvalResult<'tcx, ty::Ty<'tcx>> { match self { Function::DropGlue(real_ty) => Ok(real_ty), @@ -221,50 +214,43 @@ impl<'a, 'tcx> Memory<'a, 'tcx> { self.alloc_map.iter() } - pub fn create_closure_ptr(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, substs: ClosureSubsts<'tcx>, fn_ty: ClosureTy<'tcx>) -> Pointer { - // FIXME: this is a hack - let fn_ty = tcx.mk_bare_fn(ty::BareFnTy { - unsafety: fn_ty.unsafety, - abi: fn_ty.abi, - sig: fn_ty.sig, - }); + pub fn create_closure_ptr(&mut self, def_id: DefId, substs: ClosureSubsts<'tcx>, sig: PolyFnSig<'tcx>) -> Pointer { self.create_fn_alloc(Function::Closure(FunctionDefinition { def_id, substs: substs.substs, - abi: fn_ty.abi, - // FIXME: why doesn't this compile? - //sig: tcx.erase_late_bound_regions(&fn_ty.sig), - sig: fn_ty.sig.skip_binder(), + sig, + })) + } + + pub fn create_fn_ptr_from_noncapture_closure(&mut self, def_id: DefId, substs: ClosureSubsts<'tcx>, sig: PolyFnSig<'tcx>) -> Pointer { + self.create_fn_alloc(Function::NonCaptureClosureAsFnPtr(FunctionDefinition { + def_id, + substs: substs.substs, + sig, })) } - pub fn create_fn_as_trait_glue(&mut self, _tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, substs: &'tcx Substs<'tcx>, fn_ty: &'tcx BareFnTy<'tcx>) -> Pointer { + pub fn create_fn_as_trait_glue(&mut self, def_id: DefId, substs: &'tcx Substs, sig: PolyFnSig<'tcx>) -> Pointer { self.create_fn_alloc(Function::FnDefAsTraitObject(FunctionDefinition { def_id, substs, - abi: fn_ty.abi, - // FIXME: why doesn't this compile? - //sig: tcx.erase_late_bound_regions(&fn_ty.sig), - sig: fn_ty.sig.skip_binder(), + sig, })) } - pub fn create_fn_ptr_as_trait_glue(&mut self, fn_ty: &'tcx BareFnTy<'tcx>) -> Pointer { - self.create_fn_alloc(Function::FnPtrAsTraitObject(fn_ty.sig.skip_binder())) + pub fn create_fn_ptr_as_trait_glue(&mut self, sig: PolyFnSig<'tcx>) -> Pointer { + self.create_fn_alloc(Function::FnPtrAsTraitObject(sig)) } pub fn create_drop_glue(&mut self, ty: ty::Ty<'tcx>) -> Pointer { self.create_fn_alloc(Function::DropGlue(ty)) } - pub fn create_fn_ptr(&mut self, _tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, substs: &'tcx Substs<'tcx>, fn_ty: &'tcx BareFnTy<'tcx>) -> Pointer { + pub fn create_fn_ptr(&mut self, def_id: DefId, substs: &'tcx Substs, sig: PolyFnSig<'tcx>) -> Pointer { self.create_fn_alloc(Function::Concrete(FunctionDefinition { def_id, substs, - abi: fn_ty.abi, - // FIXME: why doesn't this compile? - //sig: tcx.erase_late_bound_regions(&fn_ty.sig), - sig: fn_ty.sig.skip_binder(), + sig, })) } @@ -535,6 +521,10 @@ impl<'a, 'tcx> Memory<'a, 'tcx> { trace!("{} closure glue for {}", msg, dump_fn_def(fn_def)); continue; }, + (None, Some(&Function::NonCaptureClosureAsFnPtr(fn_def))) => { + trace!("{} non-capture closure as fn ptr glue for {}", msg, dump_fn_def(fn_def)); + continue; + }, (None, None) => { trace!("{} (deallocated)", msg); continue; @@ -606,12 +596,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> { fn dump_fn_def<'tcx>(fn_def: FunctionDefinition<'tcx>) -> String { let name = ty::tls::with(|tcx| tcx.item_path_str(fn_def.def_id)); - let abi = if fn_def.abi == Abi::Rust { - format!("") - } else { - format!("extern {} ", fn_def.abi) - }; - format!("function pointer: {}: {}{}", name, abi, fn_def.sig) + format!("function pointer: {}: {}", name, fn_def.sig.skip_binder()) } /// Byte accessors diff --git a/src/operator.rs b/src/operator.rs index 2823e3edbe..155d5574da 100644 --- a/src/operator.rs +++ b/src/operator.rs @@ -178,25 +178,9 @@ pub fn binary_op<'tcx>( // These ops can have an RHS with a different numeric type. if bin_op == Shl || bin_op == Shr { - // These are the maximum values a bitshift RHS could possibly have. For example, u16 - // can be bitshifted by 0..16, so masking with 0b1111 (16 - 1) will ensure we are in - // that range. - let type_bits: u32 = match left_kind { - I8 | U8 => 8, - I16 | U16 => 16, - I32 | U32 => 32, - I64 | U64 => 64, - I128 | U128 => 128, - _ => bug!("bad MIR: bitshift lhs is not integral"), - }; - - // Cast to `u32` because `overflowing_sh{l,r}` only take `u32`, then apply the bitmask - // to ensure it's within the valid shift value range. - let masked_shift_width = (r as u32) & (type_bits - 1); - return match bin_op { - Shl => int_shift!(left_kind, overflowing_shl, l, masked_shift_width), - Shr => int_shift!(left_kind, overflowing_shr, l, masked_shift_width), + Shl => int_shift!(left_kind, overflowing_shl, l, r as u32), + Shr => int_shift!(left_kind, overflowing_shr, l, r as u32), _ => bug!("it has already been checked that this is a shift op"), }; } diff --git a/src/step.rs b/src/step.rs index c08ac9693a..23f0142a7f 100644 --- a/src/step.rs +++ b/src/step.rs @@ -242,7 +242,8 @@ impl<'a, 'b, 'tcx> Visitor<'tcx> for ConstantExtractor<'a, 'b, 'tcx> { location: mir::Location ) { self.super_lvalue(lvalue, context, location); - if let mir::Lvalue::Static(def_id) = *lvalue { + if let mir::Lvalue::Static(ref static_) = *lvalue { + let def_id = static_.def_id; let substs = self.ecx.tcx.intern_substs(&[]); let span = self.span; if let Some(node_item) = self.ecx.tcx.hir.get_if_local(def_id) { diff --git a/src/terminator/drop.rs b/src/terminator/drop.rs index 289bae89c3..efa4156793 100644 --- a/src/terminator/drop.rs +++ b/src/terminator/drop.rs @@ -97,7 +97,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { let (adt_ptr, extra) = lval.to_ptr_and_extra(); // run drop impl before the fields' drop impls - if let Some(drop_def_id) = adt_def.destructor() { + if let Some(destructor) = adt_def.destructor(self.tcx) { let trait_ref = ty::Binder(ty::TraitRef { def_id: self.tcx.lang_items.drop_trait().unwrap(), substs: self.tcx.mk_substs_trait(ty, &[]), @@ -112,7 +112,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { LvalueExtra::Length(n) => Value::ByValPair(PrimVal::Ptr(adt_ptr), PrimVal::from_u128(n as u128)), LvalueExtra::Vtable(vtable) => Value::ByValPair(PrimVal::Ptr(adt_ptr), PrimVal::Ptr(vtable)), }; - drop.push((drop_def_id, val, vtable.substs)); + drop.push((destructor.did, val, vtable.substs)); } let layout = self.type_layout(ty)?; @@ -121,7 +121,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { Layout::General { .. } => { let discr_val = self.read_discriminant_value(adt_ptr, ty)? as u128; let ptr = self.force_allocation(lval)?.to_ptr(); - match adt_def.variants.iter().position(|v| discr_val == v.disr_val) { + match adt_def.discriminants(self.tcx).position(|v| discr_val == v.to_u128_unchecked()) { Some(i) => { lval = Lvalue::Ptr { ptr, diff --git a/src/terminator/intrinsic.rs b/src/terminator/intrinsic.rs index ab8539be31..2776857a88 100644 --- a/src/terminator/intrinsic.rs +++ b/src/terminator/intrinsic.rs @@ -78,7 +78,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { // we are inherently singlethreaded and singlecored, this is a nop } - "atomic_xchg" => { + _ if intrinsic_name.starts_with("atomic_xchg") => { let ty = substs.type_at(0); let ptr = arg_vals[0].read_ptr(&self.memory)?; let change = self.value_to_primval(arg_vals[1], ty)?; @@ -92,8 +92,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { self.write_primval(Lvalue::from_ptr(ptr), change, ty)?; } - "atomic_cxchg_relaxed" | - "atomic_cxchg" => { + _ if intrinsic_name.starts_with("atomic_cxchg") => { let ty = substs.type_at(0); let ptr = arg_vals[0].read_ptr(&self.memory)?; let expect_old = self.value_to_primval(arg_vals[1], ty)?; @@ -111,8 +110,11 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { self.write_primval(Lvalue::from_ptr(ptr), change, ty)?; } - "atomic_xadd" | - "atomic_xadd_relaxed" => { + "atomic_or" | "atomic_or_acq" | "atomic_or_rel" | "atomic_or_acqrel" | "atomic_or_relaxed" | + "atomic_xor" | "atomic_xor_acq" | "atomic_xor_rel" | "atomic_xor_acqrel" | "atomic_xor_relaxed" | + "atomic_and" | "atomic_and_acq" | "atomic_and_rel" | "atomic_and_acqrel" | "atomic_and_relaxed" | + "atomic_xadd" | "atomic_xadd_acq" | "atomic_xadd_rel" | "atomic_xadd_acqrel" | "atomic_xadd_relaxed" | + "atomic_xsub" | "atomic_xsub_acq" | "atomic_xsub_rel" | "atomic_xsub_acqrel" | "atomic_xsub_relaxed" => { let ty = substs.type_at(0); let ptr = arg_vals[0].read_ptr(&self.memory)?; let change = self.value_to_primval(arg_vals[1], ty)?; @@ -124,27 +126,18 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { }; self.write_primval(dest, old, ty)?; let kind = self.ty_to_primval_kind(ty)?; - // FIXME: what do atomics do on overflow? - let (val, _) = operator::binary_op(mir::BinOp::Add, old, kind, change, kind)?; - self.write_primval(Lvalue::from_ptr(ptr), val, ty)?; - }, - - "atomic_xsub_rel" => { - let ty = substs.type_at(0); - let ptr = arg_vals[0].read_ptr(&self.memory)?; - let change = self.value_to_primval(arg_vals[1], ty)?; - let old = self.read_value(ptr, ty)?; - let old = match old { - Value::ByVal(val) => val, - Value::ByRef(_) => bug!("just read the value, can't be byref"), - Value::ByValPair(..) => bug!("atomic_xsub_rel doesn't work with nonprimitives"), + let op = match intrinsic_name.split('_').nth(1).unwrap() { + "or" => mir::BinOp::BitOr, + "xor" => mir::BinOp::BitXor, + "and" => mir::BinOp::BitAnd, + "xadd" => mir::BinOp::Add, + "xsub" => mir::BinOp::Sub, + _ => bug!(), }; - self.write_primval(dest, old, ty)?; - let kind = self.ty_to_primval_kind(ty)?; // FIXME: what do atomics do on overflow? - let (val, _) = operator::binary_op(mir::BinOp::Sub, old, kind, change, kind)?; + let (val, _) = operator::binary_op(op, old, kind, change, kind)?; self.write_primval(Lvalue::from_ptr(ptr), val, ty)?; - } + }, "breakpoint" => unimplemented!(), // halt miri @@ -207,14 +200,50 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { return self.eval_drop_impls(drops, span); } - "fabsf32" => { + "sinf32" | "fabsf32" | "cosf32" | + "sqrtf32" | "expf32" | "exp2f32" | + "logf32" | "log10f32" | "log2f32" | + "floorf32" | "ceilf32" | "truncf32" => { let f = self.value_to_primval(arg_vals[0], f32)?.to_f32()?; - self.write_primval(dest, PrimVal::from_f32(f.abs()), dest_ty)?; + let f = match intrinsic_name { + "sinf32" => f.sin(), + "fabsf32" => f.abs(), + "cosf32" => f.cos(), + "sqrtf32" => f.sqrt(), + "expf32" => f.exp(), + "exp2f32" => f.exp2(), + "logf32" => f.ln(), + "log10f32" => f.log10(), + "log2f32" => f.log2(), + "floorf32" => f.floor(), + "ceilf32" => f.ceil(), + "truncf32" => f.trunc(), + _ => bug!(), + }; + self.write_primval(dest, PrimVal::from_f32(f), dest_ty)?; } - "fabsf64" => { + "sinf64" | "fabsf64" | "cosf64" | + "sqrtf64" | "expf64" | "exp2f64" | + "logf64" | "log10f64" | "log2f64" | + "floorf64" | "ceilf64" | "truncf64" => { let f = self.value_to_primval(arg_vals[0], f64)?.to_f64()?; - self.write_primval(dest, PrimVal::from_f64(f.abs()), dest_ty)?; + let f = match intrinsic_name { + "sinf64" => f.sin(), + "fabsf64" => f.abs(), + "cosf64" => f.cos(), + "sqrtf64" => f.sqrt(), + "expf64" => f.exp(), + "exp2f64" => f.exp2(), + "logf64" => f.ln(), + "log10f64" => f.log10(), + "log2f64" => f.log2(), + "floorf64" => f.floor(), + "ceilf64" => f.ceil(), + "truncf64" => f.trunc(), + _ => bug!(), + }; + self.write_primval(dest, PrimVal::from_f64(f), dest_ty)?; } "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => { @@ -320,26 +349,42 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { self.intrinsic_overflowing(mir::BinOp::Add, &args[0], &args[1], dest, dest_ty)?; } - "powif32" => { + "powf32" => { let f = self.value_to_primval(arg_vals[0], f32)?.to_f32()?; - let i = self.value_to_primval(arg_vals[1], i32)?.to_i128()?; - self.write_primval(dest, PrimVal::from_f32(f.powi(i as i32)), dest_ty)?; + let f2 = self.value_to_primval(arg_vals[1], f32)?.to_f32()?; + self.write_primval(dest, PrimVal::from_f32(f.powf(f2)), dest_ty)?; } - "powif64" => { + "powf64" => { let f = self.value_to_primval(arg_vals[0], f64)?.to_f64()?; - let i = self.value_to_primval(arg_vals[1], i32)?.to_i128()?; - self.write_primval(dest, PrimVal::from_f64(f.powi(i as i32)), dest_ty)?; + let f2 = self.value_to_primval(arg_vals[1], f64)?.to_f64()?; + self.write_primval(dest, PrimVal::from_f64(f.powf(f2)), dest_ty)?; + } + + "fmaf32" => { + let a = self.value_to_primval(arg_vals[0], f32)?.to_f32()?; + let b = self.value_to_primval(arg_vals[1], f32)?.to_f32()?; + let c = self.value_to_primval(arg_vals[2], f32)?.to_f32()?; + self.write_primval(dest, PrimVal::from_f32(a * b + c), dest_ty)?; + } + + "fmaf64" => { + let a = self.value_to_primval(arg_vals[0], f64)?.to_f64()?; + let b = self.value_to_primval(arg_vals[1], f64)?.to_f64()?; + let c = self.value_to_primval(arg_vals[2], f64)?.to_f64()?; + self.write_primval(dest, PrimVal::from_f64(a * b + c), dest_ty)?; } - "sqrtf32" => { + "powif32" => { let f = self.value_to_primval(arg_vals[0], f32)?.to_f32()?; - self.write_primval(dest, PrimVal::from_f32(f.sqrt()), dest_ty)?; + let i = self.value_to_primval(arg_vals[1], i32)?.to_i128()?; + self.write_primval(dest, PrimVal::from_f32(f.powi(i as i32)), dest_ty)?; } - "sqrtf64" => { + "powif64" => { let f = self.value_to_primval(arg_vals[0], f64)?.to_f64()?; - self.write_primval(dest, PrimVal::from_f64(f.sqrt()), dest_ty)?; + let i = self.value_to_primval(arg_vals[1], i32)?.to_i128()?; + self.write_primval(dest, PrimVal::from_f64(f.powi(i as i32)), dest_ty)?; } "size_of" => { diff --git a/src/terminator/mod.rs b/src/terminator/mod.rs index 183a3f54fb..7968272311 100644 --- a/src/terminator/mod.rs +++ b/src/terminator/mod.rs @@ -2,14 +2,16 @@ use rustc::hir::def_id::DefId; use rustc::mir; use rustc::ty::layout::Layout; use rustc::ty::subst::Substs; -use rustc::ty::{self, Ty, BareFnTy}; +use rustc::ty::{self, Ty}; +use rustc_const_math::ConstInt; use syntax::codemap::Span; use syntax::attr; +use syntax::abi::Abi; use error::{EvalError, EvalResult}; use eval_context::{EvalContext, IntegerExt, StackPopCleanup, is_inhabited}; use lvalue::Lvalue; -use memory::{Pointer, FunctionDefinition}; +use memory::{Pointer, FunctionDefinition, Function}; use value::PrimVal; use value::Value; @@ -61,35 +63,53 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { }; let func_ty = self.operand_ty(func); - match func_ty.sty { - ty::TyFnPtr(bare_fn_ty) => { + let fn_def = match func_ty.sty { + ty::TyFnPtr(bare_sig) => { + let bare_sig = self.erase_lifetimes(&bare_sig); let fn_ptr = self.eval_operand_to_primval(func)?.to_ptr()?; - let FunctionDefinition {def_id, substs, abi, sig} = self.memory.get_fn(fn_ptr.alloc_id)?.expect_concrete()?; - let bare_sig = self.tcx.erase_late_bound_regions_and_normalize(&bare_fn_ty.sig); - let bare_sig = self.tcx.erase_regions(&bare_sig); - // transmuting function pointers in miri is fine as long as the number of - // arguments and the abi don't change. - // FIXME: also check the size of the arguments' type and the return type - // Didn't get it to work, since that triggers an assertion in rustc which - // checks whether the type has escaping regions - if abi != bare_fn_ty.abi || - sig.variadic != bare_sig.variadic || - sig.inputs().len() != bare_sig.inputs().len() { - return Err(EvalError::FunctionPointerTyMismatch(abi, sig, bare_fn_ty)); + let fn_def = self.memory.get_fn(fn_ptr.alloc_id)?; + match fn_def { + Function::Concrete(fn_def) => { + // transmuting function pointers in miri is fine as long as the number of + // arguments and the abi don't change. + let sig = self.erase_lifetimes(&fn_def.sig); + if sig.abi != bare_sig.abi || + sig.variadic != bare_sig.variadic || + sig.inputs_and_output != bare_sig.inputs_and_output { + return Err(EvalError::FunctionPointerTyMismatch(sig, bare_sig)); + } + }, + Function::NonCaptureClosureAsFnPtr(fn_def) => { + let sig = self.erase_lifetimes(&fn_def.sig); + assert_eq!(sig.abi, Abi::RustCall); + if sig.variadic != bare_sig.variadic || + sig.inputs().len() != 1 { + return Err(EvalError::FunctionPointerTyMismatch(sig, bare_sig)); + } + if let ty::TyTuple(fields, _) = sig.inputs()[0].sty { + if **fields != *bare_sig.inputs() { + return Err(EvalError::FunctionPointerTyMismatch(sig, bare_sig)); + } + } else { + return Err(EvalError::FunctionPointerTyMismatch(sig, bare_sig)); + } + }, + other => return Err(EvalError::ExpectedConcreteFunction(other)), } - self.eval_fn_call(def_id, substs, bare_fn_ty, destination, args, - terminator.source_info.span)? + self.memory.get_fn(fn_ptr.alloc_id)? }, - ty::TyFnDef(def_id, substs, fn_ty) => { - self.eval_fn_call(def_id, substs, fn_ty, destination, args, - terminator.source_info.span)? - } + ty::TyFnDef(def_id, substs, fn_ty) => Function::Concrete(FunctionDefinition { + def_id, + substs, + sig: fn_ty, + }), _ => { let msg = format!("can't handle callee of type {:?}", func_ty); return Err(EvalError::Unimplemented(msg)); } - } + }; + self.eval_fn_call(fn_def, destination, args, terminator.source_info.span)?; } Drop { ref location, target, .. } => { @@ -138,17 +158,17 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { fn eval_fn_call( &mut self, - def_id: DefId, - substs: &'tcx Substs<'tcx>, - fn_ty: &'tcx BareFnTy, + fn_def: Function<'tcx>, destination: Option<(Lvalue<'tcx>, mir::BasicBlock)>, arg_operands: &[mir::Operand<'tcx>], span: Span, ) -> EvalResult<'tcx> { use syntax::abi::Abi; - match fn_ty.abi { - Abi::RustIntrinsic => { - let ty = fn_ty.sig.0.output(); + match fn_def { + // Intrinsics can only be addressed directly + Function::Concrete(FunctionDefinition { def_id, substs, sig }) if sig.abi() == Abi::RustIntrinsic => { + let sig = self.erase_lifetimes(&sig); + let ty = sig.output(); let layout = self.type_layout(ty)?; let (ret, target) = match destination { Some(dest) if is_inhabited(self.tcx, ty) => dest, @@ -157,18 +177,19 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { self.call_intrinsic(def_id, substs, arg_operands, ret, ty, layout, target)?; self.dump_local(ret); Ok(()) - } - - Abi::C => { - let ty = fn_ty.sig.0.output(); + }, + // C functions can only be addressed directly + Function::Concrete(FunctionDefinition { def_id, sig, ..}) if sig.abi() == Abi::C => { + let sig = self.erase_lifetimes(&sig); + let ty = sig.output(); let (ret, target) = destination.unwrap(); self.call_c_abi(def_id, arg_operands, ret, ty)?; self.dump_local(ret); self.goto_block(target); Ok(()) - } - - Abi::Rust | Abi::RustCall => { + }, + Function::DropGlue(_) => Err(EvalError::ManuallyCalledDropGlue), + Function::Concrete(FunctionDefinition { def_id, sig, substs }) if sig.abi() == Abi::Rust || sig.abi() == Abi::RustCall => { let mut args = Vec::new(); for arg in arg_operands { let arg_val = self.eval_operand(arg)?; @@ -185,20 +206,20 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { }; // FIXME(eddyb) Detect ADT constructors more efficiently. - if let Some(adt_def) = fn_ty.sig.skip_binder().output().ty_adt_def() { - if let Some(v) = adt_def.variants.iter().find(|v| resolved_def_id == v.did) { + if let Some(adt_def) = sig.output().skip_binder().ty_adt_def() { + let dids = adt_def.variants.iter().map(|v| v.did); + let discrs = adt_def.discriminants(self.tcx).map(ConstInt::to_u128_unchecked); + if let Some((_, disr_val)) = dids.zip(discrs).find(|&(did, _)| resolved_def_id == did) { let (lvalue, target) = destination.expect("tuple struct constructors can't diverge"); let dest_ty = self.tcx.item_type(adt_def.did); let dest_layout = self.type_layout(dest_ty)?; trace!("layout({:?}) = {:#?}", dest_ty, dest_layout); match *dest_layout { Layout::Univariant { .. } => { - let disr_val = v.disr_val; assert_eq!(disr_val, 0); self.assign_fields(lvalue, dest_ty, args)?; }, Layout::General { discr, ref variants, .. } => { - let disr_val = v.disr_val; let discr_size = discr.size().bytes(); self.assign_discr_and_fields( lvalue, @@ -211,7 +232,6 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { )?; }, Layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => { - let disr_val = v.disr_val; if nndiscr as u128 == disr_val { self.assign_fields(lvalue, dest_ty, args)?; } else { @@ -240,66 +260,107 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { return Ok(()); } } - - let mir = match self.load_mir(resolved_def_id) { - Ok(mir) => mir, - Err(EvalError::NoMirFor(path)) => { - match &path[..] { - // let's just ignore all output for now - "std::io::_print" => { - self.goto_block(destination.unwrap().1); - return Ok(()); - }, - "std::thread::Builder::new" => return Err(EvalError::Unimplemented("miri does not support threading".to_owned())), - "std::env::args" => return Err(EvalError::Unimplemented("miri does not support program arguments".to_owned())), - "std::panicking::rust_panic_with_hook" | - "std::rt::begin_panic_fmt" => return Err(EvalError::Panic), - "std::panicking::panicking" | - "std::rt::panicking" => { - let (lval, block) = destination.expect("std::rt::panicking does not diverge"); - // we abort on panic -> `std::rt::panicking` always returns false - let bool = self.tcx.types.bool; - self.write_primval(lval, PrimVal::from_bool(false), bool)?; - self.goto_block(block); - return Ok(()); - } - _ => {}, - } - return Err(EvalError::NoMirFor(path)); - }, - Err(other) => return Err(other), - }; - let (return_lvalue, return_to_block) = match destination { - Some((lvalue, block)) => (lvalue, StackPopCleanup::Goto(block)), - None => { - // FIXME(solson) - let lvalue = Lvalue::from_ptr(Pointer::never_ptr()); - (lvalue, StackPopCleanup::None) - } - }; - - self.push_stack_frame( + self.eval_fn_call_inner( resolved_def_id, - span, - mir, resolved_substs, - return_lvalue, - return_to_block, + destination, + args, temporaries, - )?; - - let arg_locals = self.frame().mir.args_iter(); - assert_eq!(self.frame().mir.arg_count, args.len()); - for (arg_local, (arg_val, arg_ty)) in arg_locals.zip(args) { - let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?; - self.write_value(arg_val, dest, arg_ty)?; + span, + ) + }, + Function::NonCaptureClosureAsFnPtr(FunctionDefinition { def_id, substs, sig }) if sig.abi() == Abi::RustCall => { + let sig = self.erase_lifetimes(&sig); + let mut args = Vec::new(); + for arg in arg_operands { + let arg_val = self.eval_operand(arg)?; + let arg_ty = self.operand_ty(arg); + args.push((arg_val, arg_ty)); } + args.insert(0, ( + Value::ByVal(PrimVal::Undef), + sig.inputs()[0], + )); + self.eval_fn_call_inner( + def_id, + substs, + destination, + args, + Vec::new(), + span, + ) + } + Function::Concrete(fn_def) => Err(EvalError::Unimplemented(format!("can't handle function with {:?} ABI", fn_def.sig.abi()))), + other => Err(EvalError::Unimplemented(format!("can't call function kind {:#?}", other))), + } + } - Ok(()) + fn eval_fn_call_inner( + &mut self, + resolved_def_id: DefId, + resolved_substs: &'tcx Substs, + destination: Option<(Lvalue<'tcx>, mir::BasicBlock)>, + args: Vec<(Value, Ty<'tcx>)>, + temporaries: Vec<(Pointer, Ty<'tcx>)>, + span: Span, + ) -> EvalResult<'tcx> { + trace!("eval_fn_call_inner: {:#?}, {:#?}, {:#?}", args, temporaries, destination); + + let mir = match self.load_mir(resolved_def_id) { + Ok(mir) => mir, + Err(EvalError::NoMirFor(path)) => { + match &path[..] { + // let's just ignore all output for now + "std::io::_print" => { + self.goto_block(destination.unwrap().1); + return Ok(()); + }, + "std::thread::Builder::new" => return Err(EvalError::Unimplemented("miri does not support threading".to_owned())), + "std::env::args" => return Err(EvalError::Unimplemented("miri does not support program arguments".to_owned())), + "std::panicking::rust_panic_with_hook" | + "std::rt::begin_panic_fmt" => return Err(EvalError::Panic), + "std::panicking::panicking" | + "std::rt::panicking" => { + let (lval, block) = destination.expect("std::rt::panicking does not diverge"); + // we abort on panic -> `std::rt::panicking` always returns false + let bool = self.tcx.types.bool; + self.write_primval(lval, PrimVal::from_bool(false), bool)?; + self.goto_block(block); + return Ok(()); + } + _ => {}, + } + return Err(EvalError::NoMirFor(path)); + }, + Err(other) => return Err(other), + }; + let (return_lvalue, return_to_block) = match destination { + Some((lvalue, block)) => (lvalue, StackPopCleanup::Goto(block)), + None => { + // FIXME(solson) + let lvalue = Lvalue::from_ptr(Pointer::never_ptr()); + (lvalue, StackPopCleanup::None) } + }; - abi => Err(EvalError::Unimplemented(format!("can't handle function with {:?} ABI", abi))), + self.push_stack_frame( + resolved_def_id, + span, + mir, + resolved_substs, + return_lvalue, + return_to_block, + temporaries, + )?; + + let arg_locals = self.frame().mir.args_iter(); + assert_eq!(self.frame().mir.arg_count, args.len()); + for (arg_local, (arg_val, arg_ty)) in arg_locals.zip(args) { + let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?; + self.write_value(arg_val, dest, arg_ty)?; } + + Ok(()) } pub fn read_discriminant_value(&self, adt_ptr: Pointer, adt_ty: Ty<'tcx>) -> EvalResult<'tcx, u128> { diff --git a/src/traits.rs b/src/traits.rs index 733095322d..72de17801d 100644 --- a/src/traits.rs +++ b/src/traits.rs @@ -113,7 +113,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { match self.memory.get_fn(fn_ptr.alloc_id)? { Function::FnDefAsTraitObject(fn_def) => { trace!("sig: {:#?}", fn_def.sig); - assert!(fn_def.abi != abi::Abi::RustCall); + assert!(fn_def.sig.abi() != abi::Abi::RustCall); assert_eq!(args.len(), 2); // a function item turned into a closure trait object // the first arg is just there to give use the vtable @@ -123,25 +123,49 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { }, Function::DropGlue(_) => Err(EvalError::ManuallyCalledDropGlue), Function::Concrete(fn_def) => { - trace!("sig: {:#?}", fn_def.sig); + let sig = self.erase_lifetimes(&fn_def.sig); + trace!("sig: {:#?}", sig); args[0] = ( Value::ByVal(PrimVal::Ptr(self_ptr)), - fn_def.sig.inputs()[0], + sig.inputs()[0], ); Ok((fn_def.def_id, fn_def.substs, Vec::new())) }, + Function::NonCaptureClosureAsFnPtr(fn_def) => { + let sig = self.erase_lifetimes(&fn_def.sig); + args.insert(0, ( + Value::ByVal(PrimVal::Undef), + sig.inputs()[0], + )); + Ok((fn_def.def_id, fn_def.substs, Vec::new())) + } Function::Closure(fn_def) => { self.unpack_fn_args(args)?; Ok((fn_def.def_id, fn_def.substs, Vec::new())) } Function::FnPtrAsTraitObject(sig) => { + let sig = self.erase_lifetimes(&sig); trace!("sig: {:#?}", sig); // the first argument was the fat ptr args.remove(0); self.unpack_fn_args(args)?; let fn_ptr = self.memory.read_ptr(self_ptr)?; - let fn_def = self.memory.get_fn(fn_ptr.alloc_id)?.expect_concrete()?; - assert_eq!(sig, fn_def.sig); + let fn_def = match self.memory.get_fn(fn_ptr.alloc_id)? { + Function::Concrete(fn_def) => { + let fn_def_sig = self.erase_lifetimes(&fn_def.sig); + assert_eq!(sig, fn_def_sig); + fn_def + }, + Function::NonCaptureClosureAsFnPtr(fn_def) => { + let fn_def_sig = self.erase_lifetimes(&fn_def.sig); + args.insert(0, ( + Value::ByVal(PrimVal::Undef), + fn_def_sig.inputs()[0], + )); + fn_def + }, + other => bug!("FnPtrAsTraitObject for {:?}", other), + }; Ok((fn_def.def_id, fn_def.substs, Vec::new())) } } @@ -201,7 +225,7 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { _ => bug!("bad function type: {}", fn_ty), }; let fn_ty = self.tcx.erase_regions(&fn_ty); - self.memory.create_fn_ptr(self.tcx, mth.method.def_id, mth.substs, fn_ty) + self.memory.create_fn_ptr(mth.method.def_id, mth.substs, fn_ty) })) .collect::>() .into_iter() @@ -214,15 +238,15 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { .. } ) => { - let closure_type = self.tcx.closure_type(closure_def_id, substs); - vec![Some(self.memory.create_closure_ptr(self.tcx, closure_def_id, substs, closure_type))].into_iter() + let closure_type = self.tcx.closure_type(closure_def_id); + vec![Some(self.memory.create_closure_ptr(closure_def_id, substs, closure_type))].into_iter() } // turn a function definition into a Fn trait object traits::VtableFnPointer(traits::VtableFnPointerData { fn_ty, .. }) => { match fn_ty.sty { ty::TyFnDef(did, substs, bare_fn_ty) => { - vec![Some(self.memory.create_fn_as_trait_glue(self.tcx, did, substs, bare_fn_ty))].into_iter() + vec![Some(self.memory.create_fn_as_trait_glue(did, substs, bare_fn_ty))].into_iter() }, ty::TyFnPtr(bare_fn_ty) => { vec![Some(self.memory.create_fn_ptr_as_trait_glue(bare_fn_ty))].into_iter() @@ -256,13 +280,14 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> { // in case there is no drop function to be called, this still needs to be initialized self.memory.write_usize(vtable, 0)?; if let ty::TyAdt(adt_def, substs) = trait_ref.self_ty().sty { - if let Some(drop_def_id) = adt_def.destructor() { - let fn_ty = match self.tcx.item_type(drop_def_id).sty { + if let Some(destructor) = adt_def.destructor(self.tcx) { + let fn_ty = match self.tcx.item_type(destructor.did).sty { ty::TyFnDef(_, _, fn_ty) => self.tcx.erase_regions(&fn_ty), _ => bug!("drop method is not a TyFnDef"), }; + let fn_ty = self.erase_lifetimes(&fn_ty); // The real type is taken from the self argument in `fn drop(&mut self)` - let real_ty = match fn_ty.sig.skip_binder().inputs()[0].sty { + let real_ty = match fn_ty.inputs()[0].sty { ty::TyRef(_, mt) => self.monomorphize(mt.ty, substs), _ => bug!("first argument of Drop::drop must be &mut T"), }; diff --git a/tests/compile-fail/cast_fn_ptr.rs b/tests/compile-fail/cast_fn_ptr.rs index c8070913f1..7509ae6ed7 100644 --- a/tests/compile-fail/cast_fn_ptr.rs +++ b/tests/compile-fail/cast_fn_ptr.rs @@ -5,5 +5,5 @@ fn main() { std::mem::transmute::(f) }; - g(42) //~ ERROR tried to call a function with abi Rust and sig + g(42) //~ ERROR tried to call a function with sig fn() through a function pointer of type fn(i32) } diff --git a/tests/compile-fail/overflowing-rsh-6.rs b/tests/compile-fail/overflowing-rsh-6.rs new file mode 100644 index 0000000000..a7ac9d1d50 --- /dev/null +++ b/tests/compile-fail/overflowing-rsh-6.rs @@ -0,0 +1,15 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(exceeding_bitshifts)] + +fn main() { + let _n = 1i64 >> 64; //~ Overflow(Shr) +} diff --git a/tests/run-pass/atomic-access-bool.rs b/tests/run-pass/atomic-access-bool.rs new file mode 100644 index 0000000000..ada5847054 --- /dev/null +++ b/tests/run-pass/atomic-access-bool.rs @@ -0,0 +1,30 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT}; +use std::sync::atomic::Ordering::*; + +static mut ATOMIC: AtomicBool = ATOMIC_BOOL_INIT; + +fn main() { + unsafe { + assert_eq!(*ATOMIC.get_mut(), false); + ATOMIC.store(true, SeqCst); + assert_eq!(*ATOMIC.get_mut(), true); + ATOMIC.fetch_or(false, SeqCst); + assert_eq!(*ATOMIC.get_mut(), true); + ATOMIC.fetch_and(false, SeqCst); + assert_eq!(*ATOMIC.get_mut(), false); + ATOMIC.fetch_nand(true, SeqCst); + assert_eq!(*ATOMIC.get_mut(), true); + ATOMIC.fetch_xor(true, SeqCst); + assert_eq!(*ATOMIC.get_mut(), false); + } +} diff --git a/tests/run-pass/atomic-compare_exchange.rs b/tests/run-pass/atomic-compare_exchange.rs new file mode 100644 index 0000000000..61e9a96588 --- /dev/null +++ b/tests/run-pass/atomic-compare_exchange.rs @@ -0,0 +1,36 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::sync::atomic::{AtomicIsize, ATOMIC_ISIZE_INIT}; +use std::sync::atomic::Ordering::*; + +static ATOMIC: AtomicIsize = ATOMIC_ISIZE_INIT; + +fn main() { + // Make sure trans can emit all the intrinsics correctly + ATOMIC.compare_exchange(0, 1, Relaxed, Relaxed).ok(); + ATOMIC.compare_exchange(0, 1, Acquire, Relaxed).ok(); + ATOMIC.compare_exchange(0, 1, Release, Relaxed).ok(); + ATOMIC.compare_exchange(0, 1, AcqRel, Relaxed).ok(); + ATOMIC.compare_exchange(0, 1, SeqCst, Relaxed).ok(); + ATOMIC.compare_exchange(0, 1, Acquire, Acquire).ok(); + ATOMIC.compare_exchange(0, 1, AcqRel, Acquire).ok(); + ATOMIC.compare_exchange(0, 1, SeqCst, Acquire).ok(); + ATOMIC.compare_exchange(0, 1, SeqCst, SeqCst).ok(); + ATOMIC.compare_exchange_weak(0, 1, Relaxed, Relaxed).ok(); + ATOMIC.compare_exchange_weak(0, 1, Acquire, Relaxed).ok(); + ATOMIC.compare_exchange_weak(0, 1, Release, Relaxed).ok(); + ATOMIC.compare_exchange_weak(0, 1, AcqRel, Relaxed).ok(); + ATOMIC.compare_exchange_weak(0, 1, SeqCst, Relaxed).ok(); + ATOMIC.compare_exchange_weak(0, 1, Acquire, Acquire).ok(); + ATOMIC.compare_exchange_weak(0, 1, AcqRel, Acquire).ok(); + ATOMIC.compare_exchange_weak(0, 1, SeqCst, Acquire).ok(); + ATOMIC.compare_exchange_weak(0, 1, SeqCst, SeqCst).ok(); +} diff --git a/tests/run-pass/intrinsics-math.rs b/tests/run-pass/intrinsics-math.rs new file mode 100644 index 0000000000..a2c5563474 --- /dev/null +++ b/tests/run-pass/intrinsics-math.rs @@ -0,0 +1,67 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +macro_rules! assert_approx_eq { + ($a:expr, $b:expr) => ({ + let (a, b) = (&$a, &$b); + assert!((*a - *b).abs() < 1.0e-6, + "{} is not approximately equal to {}", *a, *b); + }) +} + +pub fn main() { + use std::f32; + use std::f64; + + assert_approx_eq!(64f32.sqrt(), 8f32); + assert_approx_eq!(64f64.sqrt(), 8f64); + + assert_approx_eq!(25f32.powi(-2), 0.0016f32); + assert_approx_eq!(23.2f64.powi(2), 538.24f64); + + assert_approx_eq!(0f32.sin(), 0f32); + assert_approx_eq!((f64::consts::PI / 2f64).sin(), 1f64); + + assert_approx_eq!(0f32.cos(), 1f32); + assert_approx_eq!((f64::consts::PI * 2f64).cos(), 1f64); + + assert_approx_eq!(25f32.powf(-2f32), 0.0016f32); + assert_approx_eq!(400f64.powf(0.5f64), 20f64); + + assert_approx_eq!((1f32.exp() - f32::consts::E).abs(), 0f32); + assert_approx_eq!(1f64.exp(), f64::consts::E); + + assert_approx_eq!(10f32.exp2(), 1024f32); + assert_approx_eq!(50f64.exp2(), 1125899906842624f64); + + assert_approx_eq!((f32::consts::E.ln() - 1f32).abs(), 0f32); + assert_approx_eq!(1f64.ln(), 0f64); + + assert_approx_eq!(10f32.log10(), 1f32); + assert_approx_eq!(f64::consts::E.log10(), f64::consts::LOG10_E); + + assert_approx_eq!(8f32.log2(), 3f32); + assert_approx_eq!(f64::consts::E.log2(), f64::consts::LOG2_E); + + assert_approx_eq!(1.0f32.mul_add(2.0f32, 5.0f32), 7.0f32); + assert_approx_eq!(0.0f64.mul_add(-2.0f64, f64::consts::E), f64::consts::E); + + assert_approx_eq!((-1.0f32).abs(), 1.0f32); + assert_approx_eq!(34.2f64.abs(), 34.2f64); + + assert_approx_eq!(3.8f32.floor(), 3.0f32); + assert_approx_eq!((-1.1f64).floor(), -2.0f64); + + assert_approx_eq!((-2.3f32).ceil(), -2.0f32); + assert_approx_eq!(3.8f64.ceil(), 4.0f64); + + assert_approx_eq!(0.1f32.trunc(), 0.0f32); + assert_approx_eq!((-0.1f64).trunc(), 0.0f64); +} diff --git a/tests/run-pass/non_capture_closure_to_fn_ptr.rs b/tests/run-pass/non_capture_closure_to_fn_ptr.rs new file mode 100644 index 0000000000..6f73a3d09d --- /dev/null +++ b/tests/run-pass/non_capture_closure_to_fn_ptr.rs @@ -0,0 +1,16 @@ +#![feature(closure_to_fn_coercion)] + +// allow(const_err) to work around a bug in warnings +#[allow(const_err)] +static FOO: fn() = || { assert_ne!(42, 43) }; +#[allow(const_err)] +static BAR: fn(i32, i32) = |a, b| { assert_ne!(a, b) }; + +fn main() { + FOO(); + BAR(44, 45); + let bar: unsafe fn(i32, i32) = BAR; + unsafe { bar(46, 47) }; + let boo: &Fn(i32, i32) = &BAR; + boo(48, 49); +} diff --git a/tests/run-pass/recursive_static.rs b/tests/run-pass/recursive_static.rs index 5b27324964..77f2902917 100644 --- a/tests/run-pass/recursive_static.rs +++ b/tests/run-pass/recursive_static.rs @@ -1,5 +1,3 @@ -#![feature(static_recursion)] - struct S(&'static S); static S1: S = S(&S2); static S2: S = S(&S1); diff --git a/tests/run-pass/rfc1623.rs b/tests/run-pass/rfc1623.rs new file mode 100644 index 0000000000..0ee523a5be --- /dev/null +++ b/tests/run-pass/rfc1623.rs @@ -0,0 +1,81 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(dead_code)] + +// very simple test for a 'static static with default lifetime +static STATIC_STR: &str = "&'static str"; +const CONST_STR: &str = "&'static str"; + +// this should be the same as without default: +static EXPLICIT_STATIC_STR: &'static str = "&'static str"; +const EXPLICIT_CONST_STR: &'static str = "&'static str"; + +// a function that elides to an unbound lifetime for both in- and output +fn id_u8_slice(arg: &[u8]) -> &[u8] { + arg +} + +// one with a function, argument elided +static STATIC_SIMPLE_FN: &fn(&[u8]) -> &[u8] = &(id_u8_slice as fn(&[u8]) -> &[u8]); +const CONST_SIMPLE_FN: &fn(&[u8]) -> &[u8] = &(id_u8_slice as fn(&[u8]) -> &[u8]); + +// this should be the same as without elision +static STATIC_NON_ELIDED_FN: &for<'a> fn(&'a [u8]) -> &'a [u8] = + &(id_u8_slice as for<'a> fn(&'a [u8]) -> &'a [u8]); +const CONST_NON_ELIDED_FN: &for<'a> fn(&'a [u8]) -> &'a [u8] = + &(id_u8_slice as for<'a> fn(&'a [u8]) -> &'a [u8]); + +// another function that elides, each to a different unbound lifetime +fn multi_args(_a: &u8, _b: &u8, _c: &u8) {} + +static STATIC_MULTI_FN: &fn(&u8, &u8, &u8) = &(multi_args as fn(&u8, &u8, &u8)); +const CONST_MULTI_FN: &fn(&u8, &u8, &u8) = &(multi_args as fn(&u8, &u8, &u8)); + +struct Foo<'a> { + bools: &'a [bool], +} + +static STATIC_FOO: Foo = Foo { bools: &[true, false] }; +const CONST_FOO: Foo = Foo { bools: &[true, false] }; + +type Bar<'a> = Foo<'a>; + +static STATIC_BAR: Bar = Bar { bools: &[true, false] }; +const CONST_BAR: Bar = Bar { bools: &[true, false] }; + +type Baz<'a> = fn(&'a [u8]) -> Option; + +fn baz(e: &[u8]) -> Option { + e.first().map(|x| *x) +} + +static STATIC_BAZ: &Baz = &(baz as Baz); +const CONST_BAZ: &Baz = &(baz as Baz); + +static BYTES: &[u8] = &[1, 2, 3]; + +fn main() { + // make sure that the lifetime is actually elided (and not defaulted) + let x = &[1u8, 2, 3]; + STATIC_SIMPLE_FN(x); + CONST_SIMPLE_FN(x); + + STATIC_BAZ(BYTES); // neees static lifetime + CONST_BAZ(BYTES); + + // make sure this works with different lifetimes + let a = &1; + { + let b = &2; + let c = &3; + CONST_MULTI_FN(a, b, c); + } +}