diff --git a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs index 44eae706ea8f6..2144e7ed67acb 100644 --- a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs +++ b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs @@ -92,9 +92,9 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> { fn get_abi_param(&self, tcx: TyCtxt<'tcx>) -> SmallVec<[AbiParam; 2]> { match self.mode { PassMode::Ignore => smallvec![], - PassMode::Direct(attrs) => match &self.layout.abi { + PassMode::Direct(attrs) => match self.layout.abi { Abi::Scalar(scalar) => smallvec![apply_arg_attrs_to_abi_param( - AbiParam::new(scalar_to_clif_type(tcx, scalar.clone())), + AbiParam::new(scalar_to_clif_type(tcx, scalar)), attrs )], Abi::Vector { .. } => { @@ -103,10 +103,10 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> { } _ => unreachable!("{:?}", self.layout.abi), }, - PassMode::Pair(attrs_a, attrs_b) => match &self.layout.abi { + PassMode::Pair(attrs_a, attrs_b) => match self.layout.abi { Abi::ScalarPair(a, b) => { - let a = scalar_to_clif_type(tcx, a.clone()); - let b = scalar_to_clif_type(tcx, b.clone()); + let a = scalar_to_clif_type(tcx, a); + let b = scalar_to_clif_type(tcx, b); smallvec![ apply_arg_attrs_to_abi_param(AbiParam::new(a), attrs_a), apply_arg_attrs_to_abi_param(AbiParam::new(b), attrs_b), @@ -139,9 +139,9 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> { fn get_abi_return(&self, tcx: TyCtxt<'tcx>) -> (Option, Vec) { match self.mode { PassMode::Ignore => (None, vec![]), - PassMode::Direct(_) => match &self.layout.abi { + PassMode::Direct(_) => match self.layout.abi { Abi::Scalar(scalar) => { - (None, vec![AbiParam::new(scalar_to_clif_type(tcx, scalar.clone()))]) + (None, vec![AbiParam::new(scalar_to_clif_type(tcx, scalar))]) } Abi::Vector { .. } => { let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout).unwrap(); @@ -149,10 +149,10 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> { } _ => unreachable!("{:?}", self.layout.abi), }, - PassMode::Pair(_, _) => match &self.layout.abi { + PassMode::Pair(_, _) => match self.layout.abi { Abi::ScalarPair(a, b) => { - let a = scalar_to_clif_type(tcx, a.clone()); - let b = scalar_to_clif_type(tcx, b.clone()); + let a = scalar_to_clif_type(tcx, a); + let b = scalar_to_clif_type(tcx, b); (None, vec![AbiParam::new(a), AbiParam::new(b)]) } _ => unreachable!("{:?}", self.layout.abi), diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs index 12f61e0c564aa..8b62b44df8ab3 100644 --- a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs @@ -143,8 +143,8 @@ macro validate_simd_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) { } pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option { - let (element, count) = match &layout.abi { - Abi::Vector { element, count } => (element.clone(), *count), + let (element, count) = match layout.abi { + Abi::Vector { element, count } => (element, count), _ => unreachable!(), }; diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs index 364b3da92b888..30d5340935f14 100644 --- a/compiler/rustc_codegen_cranelift/src/value_and_place.rs +++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs @@ -49,11 +49,7 @@ fn codegen_field<'tcx>( } } -fn scalar_pair_calculate_b_offset( - tcx: TyCtxt<'_>, - a_scalar: &Scalar, - b_scalar: &Scalar, -) -> Offset32 { +fn scalar_pair_calculate_b_offset(tcx: TyCtxt<'_>, a_scalar: Scalar, b_scalar: Scalar) -> Offset32 { let b_offset = a_scalar.value.size(&tcx).align_to(b_scalar.value.align(&tcx).abi); Offset32::new(b_offset.bytes().try_into().unwrap()) } @@ -124,12 +120,10 @@ impl<'tcx> CValue<'tcx> { match self.0 { CValueInner::ByRef(ptr, None) => { let clif_ty = match layout.abi { - Abi::Scalar(ref scalar) => scalar_to_clif_type(fx.tcx, scalar.clone()), - Abi::Vector { ref element, count } => { - scalar_to_clif_type(fx.tcx, element.clone()) - .by(u16::try_from(count).unwrap()) - .unwrap() - } + Abi::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar), + Abi::Vector { element, count } => scalar_to_clif_type(fx.tcx, element) + .by(u16::try_from(count).unwrap()) + .unwrap(), _ => unreachable!("{:?}", layout.ty), }; let mut flags = MemFlags::new(); @@ -147,13 +141,13 @@ impl<'tcx> CValue<'tcx> { let layout = self.1; match self.0 { CValueInner::ByRef(ptr, None) => { - let (a_scalar, b_scalar) = match &layout.abi { + let (a_scalar, b_scalar) = match layout.abi { Abi::ScalarPair(a, b) => (a, b), _ => unreachable!("load_scalar_pair({:?})", self), }; let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar); - let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar.clone()); - let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar.clone()); + let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar); + let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar); let mut flags = MemFlags::new(); flags.set_notrap(); let val1 = ptr.load(fx, clif_ty1, flags); @@ -564,7 +558,7 @@ impl<'tcx> CPlace<'tcx> { to_ptr.store(fx, val, flags); return; } - Abi::ScalarPair(ref a_scalar, ref b_scalar) => { + Abi::ScalarPair(a_scalar, b_scalar) => { let (value, extra) = from.load_scalar_pair(fx); let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar); to_ptr.store(fx, value, flags); diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index cd55a61cbaf9d..824bcd0383cd7 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -536,16 +536,13 @@ impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> { } _ => {} } - if let abi::Abi::Scalar(ref scalar) = self.ret.layout.abi { + if let abi::Abi::Scalar(scalar) = self.ret.layout.abi { // If the value is a boolean, the range is 0..2 and that ultimately // become 0..0 when the type becomes i1, which would be rejected // by the LLVM verifier. if let Int(..) = scalar.value { - if !scalar.is_bool() { - let range = scalar.valid_range_exclusive(bx); - if range.start != range.end { - bx.range_metadata(callsite, range); - } + if !scalar.is_bool() && !scalar.is_always_valid(bx) { + bx.range_metadata(callsite, scalar.valid_range); } } } diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index e0d312727a57d..9690ad8b24603 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -792,7 +792,7 @@ fn dummy_output_type(cx: &CodegenCx<'ll, 'tcx>, reg: InlineAsmRegClass) -> &'ll /// Helper function to get the LLVM type for a Scalar. Pointers are returned as /// the equivalent integer type. -fn llvm_asm_scalar_type(cx: &CodegenCx<'ll, 'tcx>, scalar: &Scalar) -> &'ll Type { +fn llvm_asm_scalar_type(cx: &CodegenCx<'ll, 'tcx>, scalar: Scalar) -> &'ll Type { match scalar.value { Primitive::Int(Integer::I8, _) => cx.type_i8(), Primitive::Int(Integer::I16, _) => cx.type_i16(), @@ -812,7 +812,7 @@ fn llvm_fixup_input( reg: InlineAsmRegClass, layout: &TyAndLayout<'tcx>, ) -> &'ll Value { - match (reg, &layout.abi) { + match (reg, layout.abi) { (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { if let Primitive::Int(Integer::I8, _) = s.value { let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8); @@ -835,7 +835,7 @@ fn llvm_fixup_input( Abi::Vector { element, count }, ) if layout.size.bytes() == 8 => { let elem_ty = llvm_asm_scalar_type(bx.cx, element); - let vec_ty = bx.cx.type_vector(elem_ty, *count); + let vec_ty = bx.cx.type_vector(elem_ty, count); let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect(); bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices)) } @@ -890,7 +890,7 @@ fn llvm_fixup_output( reg: InlineAsmRegClass, layout: &TyAndLayout<'tcx>, ) -> &'ll Value { - match (reg, &layout.abi) { + match (reg, layout.abi) { (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { if let Primitive::Int(Integer::I8, _) = s.value { bx.extract_element(value, bx.const_i32(0)) @@ -910,8 +910,8 @@ fn llvm_fixup_output( Abi::Vector { element, count }, ) if layout.size.bytes() == 8 => { let elem_ty = llvm_asm_scalar_type(bx.cx, element); - let vec_ty = bx.cx.type_vector(elem_ty, *count * 2); - let indices: Vec<_> = (0..*count).map(|x| bx.const_i32(x as i32)).collect(); + let vec_ty = bx.cx.type_vector(elem_ty, count * 2); + let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect(); bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices)) } (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) @@ -965,7 +965,7 @@ fn llvm_fixup_output_type( reg: InlineAsmRegClass, layout: &TyAndLayout<'tcx>, ) -> &'ll Type { - match (reg, &layout.abi) { + match (reg, layout.abi) { (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { if let Primitive::Int(Integer::I8, _) = s.value { cx.type_vector(cx.type_i8(), 8) diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index da24fe08f0dfd..799f9a57e93a2 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -18,12 +18,12 @@ use rustc_hir::def_id::DefId; use rustc_middle::ty::layout::{LayoutError, LayoutOfHelpers, TyAndLayout}; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_span::Span; -use rustc_target::abi::{self, Align, Size}; +use rustc_target::abi::{self, Align, Size, WrappingRange}; use rustc_target::spec::{HasTargetSpec, Target}; use std::borrow::Cow; use std::ffi::CStr; use std::iter; -use std::ops::{Deref, Range}; +use std::ops::Deref; use std::ptr; use tracing::debug; @@ -382,7 +382,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { val } } - fn to_immediate_scalar(&mut self, val: Self::Value, scalar: &abi::Scalar) -> Self::Value { + fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value { if scalar.is_bool() { return self.trunc(val, self.cx().type_i1()); } @@ -460,16 +460,15 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn scalar_load_metadata<'a, 'll, 'tcx>( bx: &mut Builder<'a, 'll, 'tcx>, load: &'ll Value, - scalar: &abi::Scalar, + scalar: abi::Scalar, ) { match scalar.value { abi::Int(..) => { - let range = scalar.valid_range_exclusive(bx); - if range.start != range.end { - bx.range_metadata(load, range); + if !scalar.is_always_valid(bx) { + bx.range_metadata(load, scalar.valid_range); } } - abi::Pointer if !scalar.valid_range.contains_zero() => { + abi::Pointer if !scalar.valid_range.contains(0) => { bx.nonnull_metadata(load); } _ => {} @@ -489,17 +488,17 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } let llval = const_llval.unwrap_or_else(|| { let load = self.load(place.layout.llvm_type(self), place.llval, place.align); - if let abi::Abi::Scalar(ref scalar) = place.layout.abi { + if let abi::Abi::Scalar(scalar) = place.layout.abi { scalar_load_metadata(self, load, scalar); } load }); OperandValue::Immediate(self.to_immediate(llval, place.layout)) - } else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi { + } else if let abi::Abi::ScalarPair(a, b) = place.layout.abi { let b_offset = a.value.size(self).align_to(b.value.align(self).abi); let pair_ty = place.layout.llvm_type(self); - let mut load = |i, scalar: &abi::Scalar, align| { + let mut load = |i, scalar: abi::Scalar, align| { let llptr = self.struct_gep(pair_ty, place.llval, i as u64); let llty = place.layout.scalar_pair_element_llvm_type(self, i, false); let load = self.load(llty, llptr, align); @@ -555,7 +554,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { next_bx } - fn range_metadata(&mut self, load: &'ll Value, range: Range) { + fn range_metadata(&mut self, load: &'ll Value, range: WrappingRange) { if self.sess().target.arch == "amdgpu" { // amdgpu/LLVM does something weird and thinks an i64 value is // split into a v2i32, halving the bitwidth LLVM expects, @@ -568,7 +567,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let llty = self.cx.val_ty(load); let v = [ self.cx.const_uint_big(llty, range.start), - self.cx.const_uint_big(llty, range.end), + self.cx.const_uint_big(llty, range.end.wrapping_add(1)), ]; llvm::LLVMSetMetadata( diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index cee582aec95eb..73a8d46443163 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -228,7 +228,7 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { }) } - fn scalar_to_backend(&self, cv: Scalar, layout: &abi::Scalar, llty: &'ll Type) -> &'ll Value { + fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: &'ll Type) -> &'ll Value { let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() }; match cv { Scalar::Int(ScalarInt::ZST) => { diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index e673b06f15527..ef3a90fdecaaa 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -111,7 +111,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Pointer::new(alloc_id, Size::from_bytes(ptr_offset)), &cx.tcx, ), - &Scalar { value: Primitive::Pointer, valid_range: WrappingRange { start: 0, end: !0 } }, + Scalar { value: Primitive::Pointer, valid_range: WrappingRange { start: 0, end: !0 } }, cx.type_i8p_ext(address_space), )); next_offset = offset + pointer_size; diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs index 9a6391443dd01..f913c3e4b703d 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs @@ -1656,7 +1656,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> { Variants::Multiple { tag_encoding: TagEncoding::Niche { ref niche_variants, niche_start, dataful_variant }, - ref tag, + tag, ref variants, tag_field, } => { @@ -2082,10 +2082,8 @@ fn prepare_enum_metadata( let layout = cx.layout_of(enum_type); - if let ( - &Abi::Scalar(_), - &Variants::Multiple { tag_encoding: TagEncoding::Direct, ref tag, .. }, - ) = (&layout.abi, &layout.variants) + if let (Abi::Scalar(_), Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. }) = + (layout.abi, &layout.variants) { return FinalMetadata(discriminant_type_metadata(tag.value)); } @@ -2093,8 +2091,8 @@ fn prepare_enum_metadata( if use_enum_fallback(cx) { let discriminant_type_metadata = match layout.variants { Variants::Single { .. } => None, - Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, ref tag, .. } - | Variants::Multiple { tag_encoding: TagEncoding::Direct, ref tag, .. } => { + Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, .. } + | Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. } => { Some(discriminant_type_metadata(tag.value)) } }; @@ -2146,9 +2144,7 @@ fn prepare_enum_metadata( // A single-variant enum has no discriminant. Variants::Single { .. } => None, - Variants::Multiple { - tag_encoding: TagEncoding::Niche { .. }, ref tag, tag_field, .. - } => { + Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, tag_field, .. } => { // Find the integer type of the correct size. let size = tag.value.size(cx); let align = tag.value.align(cx); @@ -2179,7 +2175,7 @@ fn prepare_enum_metadata( } } - Variants::Multiple { tag_encoding: TagEncoding::Direct, ref tag, tag_field, .. } => { + Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, tag_field, .. } => { let discr_type = tag.value.to_ty(cx.tcx); let (size, align) = cx.size_and_align_of(discr_type); diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 1aa52d975e9a0..1060f911a9ed4 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -133,7 +133,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { } sym::va_arg => { match fn_abi.ret.layout.abi { - abi::Abi::Scalar(ref scalar) => { + abi::Abi::Scalar(scalar) => { match scalar.value { Primitive::Int(..) => { if self.cx().size_of(ret_ty).bytes() < 4 { diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index d615d230ea0d1..3e39bf3e995a2 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -23,7 +23,7 @@ fn uncached_llvm_type<'a, 'tcx>( ) -> &'a Type { match layout.abi { Abi::Scalar(_) => bug!("handled elsewhere"), - Abi::Vector { ref element, count } => { + Abi::Vector { element, count } => { let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO); return cx.type_vector(element, count); } @@ -177,7 +177,7 @@ pub trait LayoutLlvmExt<'tcx> { fn scalar_llvm_type_at<'a>( &self, cx: &CodegenCx<'a, 'tcx>, - scalar: &Scalar, + scalar: Scalar, offset: Size, ) -> &'a Type; fn scalar_pair_element_llvm_type<'a>( @@ -218,7 +218,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { /// of that field's type - this is useful for taking the address of /// that field and ensuring the struct has the right alignment. fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { - if let Abi::Scalar(ref scalar) = self.abi { + if let Abi::Scalar(scalar) = self.abi { // Use a different cache for scalars because pointers to DSTs // can be either fat or thin (data pointers of fat pointers). if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) { @@ -286,7 +286,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { } fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { - if let Abi::Scalar(ref scalar) = self.abi { + if let Abi::Scalar(scalar) = self.abi { if scalar.is_bool() { return cx.type_i1(); } @@ -297,7 +297,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { fn scalar_llvm_type_at<'a>( &self, cx: &CodegenCx<'a, 'tcx>, - scalar: &Scalar, + scalar: Scalar, offset: Size, ) -> &'a Type { match scalar.value { @@ -337,7 +337,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { } let (a, b) = match self.abi { - Abi::ScalarPair(ref a, ref b) => (a, b), + Abi::ScalarPair(a, b) => (a, b), _ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self), }; let scalar = [a, b][index]; diff --git a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs index f0b32c96309d6..e842f5e9391c8 100644 --- a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs +++ b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs @@ -404,7 +404,7 @@ fn push_debuginfo_type_name<'tcx>( // calculate the range of values for the dataful variant let dataful_discriminant_range = - &dataful_variant_layout.largest_niche.as_ref().unwrap().scalar.valid_range; + dataful_variant_layout.largest_niche.unwrap().scalar.valid_range; let min = dataful_discriminant_range.start; let min = tag.value.size(&tcx).truncate(min); diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index 2ca7e8fd7e8e3..cd0e9354c2441 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -20,7 +20,7 @@ use rustc_middle::ty::{self, Instance, Ty, TypeFoldable}; use rustc_span::source_map::Span; use rustc_span::{sym, Symbol}; use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode}; -use rustc_target::abi::{self, HasDataLayout}; +use rustc_target::abi::{self, HasDataLayout, WrappingRange}; use rustc_target::spec::abi::Abi; /// Used by `FunctionCx::codegen_terminator` for emitting common patterns @@ -1102,9 +1102,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // the load would just produce `OperandValue::Ref` instead // of the `OperandValue::Immediate` we need for the call. llval = bx.load(bx.backend_type(arg.layout), llval, align); - if let abi::Abi::Scalar(ref scalar) = arg.layout.abi { + if let abi::Abi::Scalar(scalar) = arg.layout.abi { if scalar.is_bool() { - bx.range_metadata(llval, 0..2); + bx.range_metadata(llval, WrappingRange { start: 0, end: 1 }); } } // We store bools as `i8` so we need to truncate to `i1`. @@ -1424,7 +1424,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let src = self.codegen_operand(bx, src); // Special-case transmutes between scalars as simple bitcasts. - match (&src.layout.abi, &dst.layout.abi) { + match (src.layout.abi, dst.layout.abi) { (abi::Abi::Scalar(src_scalar), abi::Abi::Scalar(dst_scalar)) => { // HACK(eddyb) LLVM doesn't like `bitcast`s between pointers and non-pointers. if (src_scalar.value == abi::Pointer) == (dst_scalar.value == abi::Pointer) { diff --git a/compiler/rustc_codegen_ssa/src/mir/constant.rs b/compiler/rustc_codegen_ssa/src/mir/constant.rs index fa8a53e60b169..93b39dc8e9ee1 100644 --- a/compiler/rustc_codegen_ssa/src/mir/constant.rs +++ b/compiler/rustc_codegen_ssa/src/mir/constant.rs @@ -68,7 +68,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if let Some(prim) = field.val.try_to_scalar() { let layout = bx.layout_of(field_ty); let scalar = match layout.abi { - Abi::Scalar(ref x) => x, + Abi::Scalar(x) => x, _ => bug!("from_const: invalid ByVal layout: {:#?}", layout), }; bx.scalar_to_backend(prim, scalar, bx.immediate_backend_type(layout)) diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 5cee25b5cca3b..ce6cec67ad41e 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -79,7 +79,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { let val = match val { ConstValue::Scalar(x) => { let scalar = match layout.abi { - Abi::Scalar(ref x) => x, + Abi::Scalar(x) => x, _ => bug!("from_const: invalid ByVal layout: {:#?}", layout), }; let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout)); @@ -87,7 +87,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { } ConstValue::Slice { data, start, end } => { let a_scalar = match layout.abi { - Abi::ScalarPair(ref a, _) => a, + Abi::ScalarPair(a, _) => a, _ => bug!("from_const: invalid ScalarPair layout: {:#?}", layout), }; let a = Scalar::from_pointer( @@ -162,7 +162,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { llval: V, layout: TyAndLayout<'tcx>, ) -> Self { - let val = if let Abi::ScalarPair(ref a, ref b) = layout.abi { + let val = if let Abi::ScalarPair(a, b) = layout.abi { debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout); // Deconstruct the immediate aggregate. @@ -185,7 +185,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { let field = self.layout.field(bx.cx(), i); let offset = self.layout.fields.offset(i); - let mut val = match (self.val, &self.layout.abi) { + let mut val = match (self.val, self.layout.abi) { // If the field is ZST, it has no data. _ if field.is_zst() => { return OperandRef::new_zst(bx, field); @@ -200,7 +200,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { } // Extract a scalar component from a pair. - (OperandValue::Pair(a_llval, b_llval), &Abi::ScalarPair(ref a, ref b)) => { + (OperandValue::Pair(a_llval, b_llval), Abi::ScalarPair(a, b)) => { if offset.bytes() == 0 { assert_eq!(field.size, a.value.size(bx.cx())); OperandValue::Immediate(a_llval) @@ -212,14 +212,14 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { } // `#[repr(simd)]` types are also immediate. - (OperandValue::Immediate(llval), &Abi::Vector { .. }) => { + (OperandValue::Immediate(llval), Abi::Vector { .. }) => { OperandValue::Immediate(bx.extract_element(llval, bx.cx().const_usize(i as u64))) } _ => bug!("OperandRef::extract_field({:?}): not applicable", self), }; - match (&mut val, &field.abi) { + match (&mut val, field.abi) { (OperandValue::Immediate(llval), _) => { // Bools in union fields needs to be truncated. *llval = bx.to_immediate(*llval, field); @@ -308,7 +308,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue { } OperandValue::Pair(a, b) => { let (a_scalar, b_scalar) = match dest.layout.abi { - Abi::ScalarPair(ref a, ref b) => (a, b), + Abi::ScalarPair(a, b) => (a, b), _ => bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout), }; let ty = bx.backend_type(dest.layout); diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index b48d6d42b4357..4b07ed1a1e6c2 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -99,7 +99,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { // Also handles the first field of Scalar, ScalarPair, and Vector layouts. self.llval } - Abi::ScalarPair(ref a, ref b) + Abi::ScalarPair(a, b) if offset == a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi) => { // Offset matches second field. @@ -222,7 +222,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { .map_or(index.as_u32() as u128, |discr| discr.val); return bx.cx().const_uint_big(cast_to, discr_val); } - Variants::Multiple { ref tag, ref tag_encoding, tag_field, .. } => { + Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => { (tag, tag_encoding, tag_field) } }; diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 3d8ea29160bd0..f65af17535abd 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -300,7 +300,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let llval = operand.immediate(); let mut signed = false; - if let Abi::Scalar(ref scalar) = operand.layout.abi { + if let Abi::Scalar(scalar) = operand.layout.abi { if let Int(_, s) = scalar.value { // We use `i1` for bytes that are always `0` or `1`, // e.g., `#[repr(i8)] enum E { A, B }`, but we can't @@ -308,8 +308,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // then `i1 1` (i.e., E::B) is effectively `i8 -1`. signed = !scalar.is_bool() && s; - let er = scalar.valid_range_exclusive(bx.cx()); - if er.end != er.start + if !scalar.is_always_valid(bx.cx()) && scalar.valid_range.end >= scalar.valid_range.start { // We want `table[e as usize ± k]` to not diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs index afb8ee3c40d55..e7da96f0adafd 100644 --- a/compiler/rustc_codegen_ssa/src/traits/builder.rs +++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs @@ -16,11 +16,9 @@ use crate::MemFlags; use rustc_middle::ty::layout::{HasParamEnv, TyAndLayout}; use rustc_middle::ty::Ty; use rustc_span::Span; -use rustc_target::abi::{Abi, Align, Scalar, Size}; +use rustc_target::abi::{Abi, Align, Scalar, Size, WrappingRange}; use rustc_target::spec::HasTargetSpec; -use std::ops::Range; - #[derive(Copy, Clone)] pub enum OverflowOp { Add, @@ -126,13 +124,13 @@ pub trait BuilderMethods<'a, 'tcx>: fn from_immediate(&mut self, val: Self::Value) -> Self::Value; fn to_immediate(&mut self, val: Self::Value, layout: TyAndLayout<'_>) -> Self::Value { - if let Abi::Scalar(ref scalar) = layout.abi { + if let Abi::Scalar(scalar) = layout.abi { self.to_immediate_scalar(val, scalar) } else { val } } - fn to_immediate_scalar(&mut self, val: Self::Value, scalar: &Scalar) -> Self::Value; + fn to_immediate_scalar(&mut self, val: Self::Value, scalar: Scalar) -> Self::Value; fn alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value; fn dynamic_alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value; @@ -158,7 +156,7 @@ pub trait BuilderMethods<'a, 'tcx>: dest: PlaceRef<'tcx, Self::Value>, ) -> Self; - fn range_metadata(&mut self, load: Self::Value, range: Range); + fn range_metadata(&mut self, load: Self::Value, range: WrappingRange); fn nonnull_metadata(&mut self, load: Self::Value); fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value; diff --git a/compiler/rustc_codegen_ssa/src/traits/consts.rs b/compiler/rustc_codegen_ssa/src/traits/consts.rs index 20f6618712327..5260b7cc33120 100644 --- a/compiler/rustc_codegen_ssa/src/traits/consts.rs +++ b/compiler/rustc_codegen_ssa/src/traits/consts.rs @@ -28,7 +28,7 @@ pub trait ConstMethods<'tcx>: BackendTypes { fn const_data_from_alloc(&self, alloc: &Allocation) -> Self::Value; - fn scalar_to_backend(&self, cv: Scalar, layout: &abi::Scalar, llty: Self::Type) -> Self::Value; + fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: Self::Type) -> Self::Value; fn from_const_alloc( &self, layout: TyAndLayout<'tcx>, diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs index 07e974b72664b..1e91ad07ba93a 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs @@ -194,7 +194,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let val = self.read_scalar(&args[0])?.check_init()?; let bits = val.to_bits(layout_of.size)?; let kind = match layout_of.abi { - Abi::Scalar(ref scalar) => scalar.value, + Abi::Scalar(scalar) => scalar.value, _ => span_bug!( self.cur_span(), "{} called on invalid type {:?}", @@ -234,7 +234,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { &r, )?; let val = if overflowed { - let num_bits = l.layout.size.bits(); + let size = l.layout.size; + let num_bits = size.bits(); if l.layout.abi.is_signed() { // For signed ints the saturated value depends on the sign of the first // term since the sign of the second term can be inferred from this and @@ -259,10 +260,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // unsigned if is_add { // max unsigned - Scalar::from_uint( - u128::MAX >> (128 - num_bits), - Size::from_bits(num_bits), - ) + Scalar::from_uint(size.unsigned_int_max(), Size::from_bits(num_bits)) } else { // underflow to 0 Scalar::from_uint(0u128, Size::from_bits(num_bits)) diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs index 63aca67c9443c..de870bd5c6cf1 100644 --- a/compiler/rustc_const_eval/src/interpret/operand.rs +++ b/compiler/rustc_const_eval/src/interpret/operand.rs @@ -274,11 +274,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let scalar = alloc.read_scalar(alloc_range(Size::ZERO, mplace.layout.size))?; Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout })) } - Abi::ScalarPair(ref a, ref b) => { + Abi::ScalarPair(a, b) => { // We checked `ptr_align` above, so all fields will have the alignment they need. // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`, // which `ptr.offset(b_offset)` cannot possibly fail to satisfy. - let (a, b) = (&a.value, &b.value); + let (a, b) = (a.value, b.value); let (a_size, b_size) = (a.size(self), b.size(self)); let b_offset = a_size.align_to(b.align(self).abi); assert!(b_offset.bytes() > 0); // we later use the offset to tell apart the fields @@ -648,7 +648,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { }; return Ok((discr, index)); } - Variants::Multiple { ref tag, ref tag_encoding, tag_field, .. } => { + Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => { (tag, tag_encoding, tag_field) } }; diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs index 95a44e3fecf3d..0da6d8169bd3a 100644 --- a/compiler/rustc_const_eval/src/interpret/place.rs +++ b/compiler/rustc_const_eval/src/interpret/place.rs @@ -752,7 +752,7 @@ where // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`, // which `ptr.offset(b_offset)` cannot possibly fail to satisfy. let (a, b) = match dest.layout.abi { - Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value), + Abi::ScalarPair(a, b) => (a.value, b.value), _ => span_bug!( self.cur_span(), "write_immediate_to_mplace: invalid ScalarPair layout: {:#?}", @@ -1000,7 +1000,7 @@ where } Variants::Multiple { tag_encoding: TagEncoding::Direct, - tag: ref tag_layout, + tag: tag_layout, tag_field, .. } => { @@ -1022,7 +1022,7 @@ where Variants::Multiple { tag_encoding: TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start }, - tag: ref tag_layout, + tag: tag_layout, tag_field, .. } => { diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs index 1c8e5e9e23c3c..a06903aedf649 100644 --- a/compiler/rustc_const_eval/src/interpret/terminator.rs +++ b/compiler/rustc_const_eval/src/interpret/terminator.rs @@ -187,17 +187,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { return false; } // Compare layout - match (&caller.abi, &callee.abi) { + match (caller.abi, callee.abi) { // Different valid ranges are okay (once we enforce validity, // that will take care to make it UB to leave the range, just // like for transmute). - (abi::Abi::Scalar(ref caller), abi::Abi::Scalar(ref callee)) => { - caller.value == callee.value + (abi::Abi::Scalar(caller), abi::Abi::Scalar(callee)) => caller.value == callee.value, + (abi::Abi::ScalarPair(caller1, caller2), abi::Abi::ScalarPair(callee1, callee2)) => { + caller1.value == callee1.value && caller2.value == callee2.value } - ( - abi::Abi::ScalarPair(ref caller1, ref caller2), - abi::Abi::ScalarPair(ref callee1, ref callee2), - ) => caller1.value == callee1.value && caller2.value == callee2.value, // Be conservative _ => false, } diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index 03e0a8e7901d9..a6375ad0e02cf 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -618,40 +618,38 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' fn visit_scalar( &mut self, op: &OpTy<'tcx, M::PointerTag>, - scalar_layout: &ScalarAbi, + scalar_layout: ScalarAbi, ) -> InterpResult<'tcx> { - let value = self.read_scalar(op)?; - let valid_range = scalar_layout.valid_range.clone(); - let WrappingRange { start: lo, end: hi } = valid_range; - // Determine the allowed range - // `max_hi` is as big as the size fits - let max_hi = u128::MAX >> (128 - op.layout.size.bits()); - assert!(hi <= max_hi); - // We could also write `(hi + 1) % (max_hi + 1) == lo` but `max_hi + 1` overflows for `u128` - if (lo == 0 && hi == max_hi) || (hi + 1 == lo) { + if scalar_layout.valid_range.is_full_for(op.layout.size) { // Nothing to check return Ok(()); } - // At least one value is excluded. Get the bits. + // At least one value is excluded. + let valid_range = scalar_layout.valid_range; + let WrappingRange { start, end } = valid_range; + let max_value = op.layout.size.unsigned_int_max(); + assert!(end <= max_value); + // Determine the allowed range + let value = self.read_scalar(op)?; let value = try_validation!( value.check_init(), self.path, err_ub!(InvalidUninitBytes(None)) => { "{}", value } - expected { "something {}", wrapping_range_format(valid_range, max_hi) }, + expected { "something {}", wrapping_range_format(valid_range, max_value) }, ); let bits = match value.try_to_int() { Err(_) => { // So this is a pointer then, and casting to an int failed. // Can only happen during CTFE. let ptr = self.ecx.scalar_to_ptr(value); - if lo == 1 && hi == max_hi { + if start == 1 && end == max_value { // Only null is the niche. So make sure the ptr is NOT null. if self.ecx.memory.ptr_may_be_null(ptr) { throw_validation_failure!(self.path, { "a potentially null pointer" } expected { "something that cannot possibly fail to be {}", - wrapping_range_format(valid_range, max_hi) + wrapping_range_format(valid_range, max_value) } ) } @@ -663,7 +661,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' { "a pointer" } expected { "something that cannot possibly fail to be {}", - wrapping_range_format(valid_range, max_hi) + wrapping_range_format(valid_range, max_value) } ) } @@ -676,7 +674,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, ' } else { throw_validation_failure!(self.path, { "{}", bits } - expected { "something {}", wrapping_range_format(valid_range, max_hi) } + expected { "something {}", wrapping_range_format(valid_range, max_value) } ) } } @@ -786,7 +784,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> { "a value of uninhabited type {:?}", op.layout.ty } ); } - Abi::Scalar(ref scalar_layout) => { + Abi::Scalar(scalar_layout) => { self.visit_scalar(op, scalar_layout)?; } Abi::ScalarPair { .. } | Abi::Vector { .. } => { diff --git a/compiler/rustc_lint/src/types.rs b/compiler/rustc_lint/src/types.rs index 480f7756db3fe..0143978cfba5f 100644 --- a/compiler/rustc_lint/src/types.rs +++ b/compiler/rustc_lint/src/types.rs @@ -1327,10 +1327,7 @@ impl<'tcx> LateLintPass<'tcx> for VariantSizeDifferences { }; let (variants, tag) = match layout.variants { Variants::Multiple { - tag_encoding: TagEncoding::Direct, - ref tag, - ref variants, - .. + tag_encoding: TagEncoding::Direct, tag, ref variants, .. } => (variants, tag), _ => return, }; diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs index f7ab9dd82ac73..1bea1cbc3b94b 100644 --- a/compiler/rustc_middle/src/ty/layout.rs +++ b/compiler/rustc_middle/src/ty/layout.rs @@ -290,9 +290,9 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { // HACK(nox): We iter on `b` and then `a` because `max_by_key` // returns the last maximum. - let largest_niche = Niche::from_scalar(dl, b_offset, b.clone()) + let largest_niche = Niche::from_scalar(dl, b_offset, b) .into_iter() - .chain(Niche::from_scalar(dl, Size::ZERO, a.clone())) + .chain(Niche::from_scalar(dl, Size::ZERO, a)) .max_by_key(|niche| niche.available(dl)); Layout { @@ -401,7 +401,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { offsets[i as usize] = offset; if !repr.hide_niche() { - if let Some(mut niche) = field.largest_niche.clone() { + if let Some(mut niche) = field.largest_niche { let available = niche.available(dl); if available > largest_niche_available { largest_niche_available = available; @@ -449,12 +449,12 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { // For plain scalars, or vectors of them, we can't unpack // newtypes for `#[repr(C)]`, as that affects C ABIs. Abi::Scalar(_) | Abi::Vector { .. } if optimize => { - abi = field.abi.clone(); + abi = field.abi; } // But scalar pairs are Rust-specific and get // treated as aggregates by C ABIs anyway. Abi::ScalarPair(..) => { - abi = field.abi.clone(); + abi = field.abi; } _ => {} } @@ -463,14 +463,14 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { // Two non-ZST fields, and they're both scalars. ( - Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })), - Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })), + Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(a), .. }, .. })), + Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(b), .. }, .. })), None, ) => { // Order by the memory placement, not source order. let ((i, a), (j, b)) = if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) }; - let pair = self.scalar_pair(a.clone(), b.clone()); + let pair = self.scalar_pair(a, b); let pair_offsets = match pair.fields { FieldsShape::Arbitrary { ref offsets, ref memory_index } => { assert_eq!(memory_index, &[0, 1]); @@ -512,9 +512,9 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { let param_env = self.param_env; let dl = self.data_layout(); let scalar_unit = |value: Primitive| { - let bits = value.size(dl).bits(); - assert!(bits <= 128); - Scalar { value, valid_range: WrappingRange { start: 0, end: (!0 >> (128 - bits)) } } + let size = value.size(dl); + assert!(size.bits() <= 128); + Scalar { value, valid_range: WrappingRange { start: 0, end: size.unsigned_int_max() } } }; let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value))); @@ -609,7 +609,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { Abi::Aggregate { sized: true } }; - let largest_niche = if count != 0 { element.largest_niche.clone() } else { None }; + let largest_niche = if count != 0 { element.largest_niche } else { None }; tcx.intern_layout(Layout { variants: Variants::Single { index: VariantIdx::new(0) }, @@ -768,8 +768,8 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { // Compute the ABI of the element type: let e_ly = self.layout_of(e_ty)?; - let e_abi = if let Abi::Scalar(ref scalar) = e_ly.abi { - scalar.clone() + let e_abi = if let Abi::Scalar(scalar) = e_ly.abi { + scalar } else { // This error isn't caught in typeck, e.g., if // the element type of the vector is generic. @@ -796,7 +796,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { variants: Variants::Single { index: VariantIdx::new(0) }, fields, abi: Abi::Vector { element: e_abi, count: e_len }, - largest_niche: e_ly.largest_niche.clone(), + largest_niche: e_ly.largest_niche, size, align, }) @@ -843,13 +843,13 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { // If all non-ZST fields have the same ABI, forward this ABI if optimize && !field.is_zst() { // Normalize scalar_unit to the maximal valid range - let field_abi = match &field.abi { + let field_abi = match field.abi { Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)), Abi::ScalarPair(x, y) => { Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value)) } Abi::Vector { element: x, count } => { - Abi::Vector { element: scalar_unit(x.value), count: *count } + Abi::Vector { element: scalar_unit(x.value), count } } Abi::Uninhabited | Abi::Aggregate { .. } => { Abi::Aggregate { sized: true } @@ -970,7 +970,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { Niche::from_scalar(dl, Size::ZERO, scalar.clone()) }; if let Some(niche) = niche { - match &st.largest_niche { + match st.largest_niche { Some(largest_niche) => { // Replace the existing niche even if they're equal, // because this one is at a lower offset. @@ -1045,7 +1045,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { let niche_candidate = variants[i] .iter() .enumerate() - .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?))) + .filter_map(|(j, field)| Some((j, field.largest_niche?))) .max_by_key(|(_, niche)| niche.available(dl)); if let Some((field_index, niche, (niche_start, niche_scalar))) = @@ -1078,31 +1078,24 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { Abi::Uninhabited } else { match st[i].abi { - Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()), - Abi::ScalarPair(ref first, ref second) => { + Abi::Scalar(_) => Abi::Scalar(niche_scalar), + Abi::ScalarPair(first, second) => { // We need to use scalar_unit to reset the // valid range to the maximal one for that // primitive, because only the niche is // guaranteed to be initialised, not the // other primitive. if offset.bytes() == 0 { - Abi::ScalarPair( - niche_scalar.clone(), - scalar_unit(second.value), - ) + Abi::ScalarPair(niche_scalar, scalar_unit(second.value)) } else { - Abi::ScalarPair( - scalar_unit(first.value), - niche_scalar.clone(), - ) + Abi::ScalarPair(scalar_unit(first.value), niche_scalar) } } _ => Abi::Aggregate { sized: true }, } }; - let largest_niche = - Niche::from_scalar(dl, offset, niche_scalar.clone()); + let largest_niche = Niche::from_scalar(dl, offset, niche_scalar); niche_filling_layout = Some(Layout { variants: Variants::Multiple { @@ -1273,7 +1266,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { } } - let tag_mask = !0u128 >> (128 - ity.size().bits()); + let tag_mask = ity.size().unsigned_int_max(); let tag = Scalar { value: Int(ity, signed), valid_range: WrappingRange { @@ -1283,7 +1276,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { }; let mut abi = Abi::Aggregate { sized: true }; if tag.value.size(dl) == size { - abi = Abi::Scalar(tag.clone()); + abi = Abi::Scalar(tag); } else { // Try to use a ScalarPair for all tagged enums. let mut common_prim = None; @@ -1303,7 +1296,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { } }; let prim = match field.abi { - Abi::Scalar(ref scalar) => scalar.value, + Abi::Scalar(scalar) => scalar.value, _ => { common_prim = None; break; @@ -1323,7 +1316,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { } } if let Some((prim, offset)) = common_prim { - let pair = self.scalar_pair(tag.clone(), scalar_unit(prim)); + let pair = self.scalar_pair(tag, scalar_unit(prim)); let pair_offsets = match pair.fields { FieldsShape::Arbitrary { ref offsets, ref memory_index } => { assert_eq!(memory_index, &[0, 1]); @@ -1347,7 +1340,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { abi = Abi::Uninhabited; } - let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone()); + let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag); let tagged_layout = Layout { variants: Variants::Multiple { @@ -1372,8 +1365,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { // pick the layout with the larger niche; otherwise, // pick tagged as it has simpler codegen. cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| { - let niche_size = - layout.largest_niche.as_ref().map_or(0, |n| n.available(dl)); + let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl)); (layout.size, cmp::Reverse(niche_size)) }) } @@ -1560,7 +1552,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { value: Primitive::Int(discr_int, false), valid_range: WrappingRange { start: 0, end: max_discr }, }; - let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone())); + let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag)); let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout }; let promoted_layouts = ineligible_locals @@ -1832,7 +1824,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { } } - Variants::Multiple { ref tag, ref tag_encoding, .. } => { + Variants::Multiple { tag, ref tag_encoding, .. } => { debug!( "print-type-size `{:#?}` adt general variants def {}", layout.ty, @@ -2240,7 +2232,7 @@ where i: usize, ) -> TyMaybeWithLayout<'tcx> { let tcx = cx.tcx(); - let tag_layout = |tag: &Scalar| -> TyAndLayout<'tcx> { + let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> { let layout = Layout::scalar(cx, tag.clone()); TyAndLayout { layout: tcx.intern_layout(layout), ty: tag.value.to_ty(tcx) } }; @@ -2329,7 +2321,7 @@ where .nth(i) .unwrap(), ), - Variants::Multiple { ref tag, tag_field, .. } => { + Variants::Multiple { tag, tag_field, .. } => { if i == tag_field { return TyMaybeWithLayout::TyAndLayout(tag_layout(tag)); } @@ -2347,7 +2339,7 @@ where } // Discriminant field for enums (where applicable). - Variants::Multiple { ref tag, .. } => { + Variants::Multiple { tag, .. } => { assert_eq!(i, 0); return TyMaybeWithLayout::TyAndLayout(tag_layout(tag)); } @@ -2906,7 +2898,7 @@ where // Handle safe Rust thin and fat pointers. let adjust_for_rust_scalar = |attrs: &mut ArgAttributes, - scalar: &Scalar, + scalar: Scalar, layout: TyAndLayout<'tcx>, offset: Size, is_return: bool| { @@ -2921,7 +2913,7 @@ where return; } - if !scalar.valid_range.contains_zero() { + if !scalar.valid_range.contains(0) { attrs.set(ArgAttribute::NonNull); } diff --git a/compiler/rustc_middle/src/ty/util.rs b/compiler/rustc_middle/src/ty/util.rs index 1b8e94260b9b5..8a2abb0375798 100644 --- a/compiler/rustc_middle/src/ty/util.rs +++ b/compiler/rustc_middle/src/ty/util.rs @@ -45,18 +45,6 @@ impl<'tcx> fmt::Display for Discr<'tcx> { } } -fn signed_min(size: Size) -> i128 { - size.sign_extend(1_u128 << (size.bits() - 1)) as i128 -} - -fn signed_max(size: Size) -> i128 { - i128::MAX >> (128 - size.bits()) -} - -fn unsigned_max(size: Size) -> u128 { - u128::MAX >> (128 - size.bits()) -} - fn int_size_and_signed<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> (Size, bool) { let (int, signed) = match *ty.kind() { Int(ity) => (Integer::from_int_ty(&tcx, ity), true), @@ -74,8 +62,8 @@ impl<'tcx> Discr<'tcx> { pub fn checked_add(self, tcx: TyCtxt<'tcx>, n: u128) -> (Self, bool) { let (size, signed) = int_size_and_signed(tcx, self.ty); let (val, oflo) = if signed { - let min = signed_min(size); - let max = signed_max(size); + let min = size.signed_int_min(); + let max = size.signed_int_max(); let val = size.sign_extend(self.val) as i128; assert!(n < (i128::MAX as u128)); let n = n as i128; @@ -86,7 +74,7 @@ impl<'tcx> Discr<'tcx> { let val = size.truncate(val); (val, oflo) } else { - let max = unsigned_max(size); + let max = size.unsigned_int_max(); let val = self.val; let oflo = val > max - n; let val = if oflo { n - (max - val) - 1 } else { val + n }; @@ -621,7 +609,8 @@ impl<'tcx> ty::TyS<'tcx> { let val = match self.kind() { ty::Int(_) | ty::Uint(_) => { let (size, signed) = int_size_and_signed(tcx, self); - let val = if signed { signed_max(size) as u128 } else { unsigned_max(size) }; + let val = + if signed { size.signed_int_max() as u128 } else { size.unsigned_int_max() }; Some(val) } ty::Char => Some(std::char::MAX as u128), @@ -640,7 +629,7 @@ impl<'tcx> ty::TyS<'tcx> { let val = match self.kind() { ty::Int(_) | ty::Uint(_) => { let (size, signed) = int_size_and_signed(tcx, self); - let val = if signed { size.truncate(signed_min(size) as u128) } else { 0 }; + let val = if signed { size.truncate(size.signed_int_min() as u128) } else { 0 }; Some(val) } ty::Char => Some(0), diff --git a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs index 68de1af613d9f..be0d5d2f1b2af 100644 --- a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs +++ b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs @@ -494,9 +494,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // Helper to get a `-1` value of the appropriate type fn neg_1_literal(&mut self, span: Span, ty: Ty<'tcx>) -> Operand<'tcx> { let param_ty = ty::ParamEnv::empty().and(ty); - let bits = self.tcx.layout_of(param_ty).unwrap().size.bits(); - let n = (!0u128) >> (128 - bits); - let literal = ty::Const::from_bits(self.tcx, n, param_ty); + let size = self.tcx.layout_of(param_ty).unwrap().size; + let literal = ty::Const::from_bits(self.tcx, size.unsigned_int_max(), param_ty); self.literal_operand(span, literal) } diff --git a/compiler/rustc_target/src/abi/call/mips64.rs b/compiler/rustc_target/src/abi/call/mips64.rs index 28ca93c46921e..2e00ffc7e1473 100644 --- a/compiler/rustc_target/src/abi/call/mips64.rs +++ b/compiler/rustc_target/src/abi/call/mips64.rs @@ -3,7 +3,7 @@ use crate::abi::{self, HasDataLayout, Size, TyAbiInterface}; fn extend_integer_width_mips(arg: &mut ArgAbi<'_, Ty>, bits: u64) { // Always sign extend u32 values on 64-bit mips - if let abi::Abi::Scalar(ref scalar) = arg.layout.abi { + if let abi::Abi::Scalar(scalar) = arg.layout.abi { if let abi::Int(i, signed) = scalar.value { if !signed && i.size().bits() == 32 { if let PassMode::Direct(ref mut attrs) = arg.mode { @@ -23,7 +23,7 @@ where C: HasDataLayout, { match ret.layout.field(cx, i).abi { - abi::Abi::Scalar(ref scalar) => match scalar.value { + abi::Abi::Scalar(scalar) => match scalar.value { abi::F32 => Some(Reg::f32()), abi::F64 => Some(Reg::f64()), _ => None, @@ -107,7 +107,7 @@ where let offset = arg.layout.fields.offset(i); // We only care about aligned doubles - if let abi::Abi::Scalar(ref scalar) = field.abi { + if let abi::Abi::Scalar(scalar) = field.abi { if let abi::F64 = scalar.value { if offset.is_aligned(dl.f64_align.abi) { // Insert enough integers to cover [last_offset, offset) diff --git a/compiler/rustc_target/src/abi/call/mod.rs b/compiler/rustc_target/src/abi/call/mod.rs index 6d3c731809107..927bebd8157a6 100644 --- a/compiler/rustc_target/src/abi/call/mod.rs +++ b/compiler/rustc_target/src/abi/call/mod.rs @@ -322,7 +322,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> { Abi::Uninhabited => Err(Heterogeneous), // The primitive for this algorithm. - Abi::Scalar(ref scalar) => { + Abi::Scalar(scalar) => { let kind = match scalar.value { abi::Int(..) | abi::Pointer => RegKind::Integer, abi::F32 | abi::F64 => RegKind::Float, @@ -450,9 +450,9 @@ impl<'a, Ty> ArgAbi<'a, Ty> { pub fn new( cx: &impl HasDataLayout, layout: TyAndLayout<'a, Ty>, - scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, &abi::Scalar, Size) -> ArgAttributes, + scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, abi::Scalar, Size) -> ArgAttributes, ) -> Self { - let mode = match &layout.abi { + let mode = match layout.abi { Abi::Uninhabited => PassMode::Ignore, Abi::Scalar(scalar) => PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO)), Abi::ScalarPair(a, b) => PassMode::Pair( @@ -504,7 +504,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> { pub fn extend_integer_width_to(&mut self, bits: u64) { // Only integers have signedness - if let Abi::Scalar(ref scalar) = self.layout.abi { + if let Abi::Scalar(scalar) = self.layout.abi { if let abi::Int(i, signed) = scalar.value { if i.size().bits() < bits { if let PassMode::Direct(ref mut attrs) = self.mode { diff --git a/compiler/rustc_target/src/abi/call/riscv.rs b/compiler/rustc_target/src/abi/call/riscv.rs index 8c2ef8c7a017d..bbefc73a076d7 100644 --- a/compiler/rustc_target/src/abi/call/riscv.rs +++ b/compiler/rustc_target/src/abi/call/riscv.rs @@ -44,7 +44,7 @@ where Ty: TyAbiInterface<'a, C> + Copy, { match arg_layout.abi { - Abi::Scalar(ref scalar) => match scalar.value { + Abi::Scalar(scalar) => match scalar.value { abi::Int(..) | abi::Pointer => { if arg_layout.size.bits() > xlen { return Err(CannotUseFpConv); @@ -297,7 +297,7 @@ fn classify_arg<'a, Ty, C>( } fn extend_integer_width<'a, Ty>(arg: &mut ArgAbi<'a, Ty>, xlen: u64) { - if let Abi::Scalar(ref scalar) = arg.layout.abi { + if let Abi::Scalar(scalar) = arg.layout.abi { if let abi::Int(i, _) = scalar.value { // 32-bit integers are always sign-extended if i.size().bits() == 32 && xlen > 32 { diff --git a/compiler/rustc_target/src/abi/call/s390x.rs b/compiler/rustc_target/src/abi/call/s390x.rs index 594108925de80..38aaee64a4d6b 100644 --- a/compiler/rustc_target/src/abi/call/s390x.rs +++ b/compiler/rustc_target/src/abi/call/s390x.rs @@ -18,7 +18,7 @@ where C: HasDataLayout, { match layout.abi { - abi::Abi::Scalar(ref scalar) => scalar.value.is_float(), + abi::Abi::Scalar(scalar) => scalar.value.is_float(), abi::Abi::Aggregate { .. } => { if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 { is_single_fp_element(cx, layout.field(cx, 0)) diff --git a/compiler/rustc_target/src/abi/call/x86.rs b/compiler/rustc_target/src/abi/call/x86.rs index 3fc197b5d7537..28064d85bf171 100644 --- a/compiler/rustc_target/src/abi/call/x86.rs +++ b/compiler/rustc_target/src/abi/call/x86.rs @@ -14,7 +14,7 @@ where C: HasDataLayout, { match layout.abi { - abi::Abi::Scalar(ref scalar) => scalar.value.is_float(), + abi::Abi::Scalar(scalar) => scalar.value.is_float(), abi::Abi::Aggregate { .. } => { if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 { is_single_fp_element(cx, layout.field(cx, 0)) diff --git a/compiler/rustc_target/src/abi/call/x86_64.rs b/compiler/rustc_target/src/abi/call/x86_64.rs index 08f0aaba59af4..fae3c3af61bf0 100644 --- a/compiler/rustc_target/src/abi/call/x86_64.rs +++ b/compiler/rustc_target/src/abi/call/x86_64.rs @@ -49,7 +49,7 @@ where let mut c = match layout.abi { Abi::Uninhabited => return Ok(()), - Abi::Scalar(ref scalar) => match scalar.value { + Abi::Scalar(scalar) => match scalar.value { abi::Int(..) | abi::Pointer => Class::Int, abi::F32 | abi::F64 => Class::Sse, }, diff --git a/compiler/rustc_target/src/abi/mod.rs b/compiler/rustc_target/src/abi/mod.rs index b0ecd117dd20e..af75007b09ae5 100644 --- a/compiler/rustc_target/src/abi/mod.rs +++ b/compiler/rustc_target/src/abi/mod.rs @@ -7,7 +7,7 @@ use std::convert::{TryFrom, TryInto}; use std::fmt; use std::iter::Step; use std::num::NonZeroUsize; -use std::ops::{Add, AddAssign, Deref, Mul, Range, RangeInclusive, Sub}; +use std::ops::{Add, AddAssign, Deref, Mul, RangeInclusive, Sub}; use std::str::FromStr; use rustc_index::vec::{Idx, IndexVec}; @@ -392,6 +392,21 @@ impl Size { // Truncate (shift left to drop out leftover values, shift right to fill with zeroes). (value << shift) >> shift } + + #[inline] + pub fn signed_int_min(&self) -> i128 { + self.sign_extend(1_u128 << (self.bits() - 1)) as i128 + } + + #[inline] + pub fn signed_int_max(&self) -> i128 { + i128::MAX >> (128 - self.bits()) + } + + #[inline] + pub fn unsigned_int_max(&self) -> u128 { + u128::MAX >> (128 - self.bits()) + } } // Panicking addition, subtraction and multiplication for convenience. @@ -739,9 +754,8 @@ impl Primitive { /// /// 254 (-2), 255 (-1), 0, 1, 2 /// -/// This is intended specifically to mirror LLVM’s `!range` metadata, -/// semantics. -#[derive(Clone, PartialEq, Eq, Hash)] +/// This is intended specifically to mirror LLVM’s `!range` metadata semantics. +#[derive(Clone, Copy, PartialEq, Eq, Hash)] #[derive(HashStable_Generic)] pub struct WrappingRange { pub start: u128, @@ -759,13 +773,6 @@ impl WrappingRange { } } - /// Returns `true` if zero is contained in the range. - /// Equal to `range.contains(0)` but should be faster. - #[inline(always)] - pub fn contains_zero(&self) -> bool { - self.start > self.end || self.start == 0 - } - /// Returns `self` with replaced `start` #[inline(always)] pub fn with_start(mut self, start: u128) -> Self { @@ -779,17 +786,29 @@ impl WrappingRange { self.end = end; self } + + /// Returns `true` if `size` completely fills the range. + #[inline] + pub fn is_full_for(&self, size: Size) -> bool { + let max_value = size.unsigned_int_max(); + debug_assert!(self.start <= max_value && self.end <= max_value); + self.start == (self.end.wrapping_add(1) & max_value) + } } impl fmt::Debug for WrappingRange { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "{}..={}", self.start, self.end)?; + if self.start > self.end { + write!(fmt, "(..={}) | ({}..)", self.end, self.start)?; + } else { + write!(fmt, "{}..={}", self.start, self.end)?; + } Ok(()) } } /// Information about one scalar component of a Rust type. -#[derive(Clone, PartialEq, Eq, Hash, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] #[derive(HashStable_Generic)] pub struct Scalar { pub value: Primitive, @@ -803,25 +822,16 @@ pub struct Scalar { impl Scalar { #[inline] pub fn is_bool(&self) -> bool { - matches!(self.value, Int(I8, false)) - && matches!(self.valid_range, WrappingRange { start: 0, end: 1 }) + matches!( + self, + Scalar { value: Int(I8, false), valid_range: WrappingRange { start: 0, end: 1 } } + ) } - /// Returns the valid range as a `x..y` range. - /// - /// If `x` and `y` are equal, the range is full, not empty. - pub fn valid_range_exclusive(&self, cx: &C) -> Range { - // For a (max) value of -1, max will be `-1 as usize`, which overflows. - // However, that is fine here (it would still represent the full range), - // i.e., if the range is everything. - let bits = self.value.size(cx).bits(); - assert!(bits <= 128); - let mask = !0u128 >> (128 - bits); - let start = self.valid_range.start; - let end = self.valid_range.end; - assert_eq!(start, start & mask); - assert_eq!(end, end & mask); - start..(end.wrapping_add(1) & mask) + /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout + #[inline] + pub fn is_always_valid(&self, cx: &C) -> bool { + self.valid_range.is_full_for(self.value.size(cx)) } } @@ -960,7 +970,7 @@ impl AddressSpace { /// Describes how values of the type are passed by target ABIs, /// in terms of categories of C types there are ABI rules for. -#[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)] pub enum Abi { Uninhabited, Scalar(Scalar), @@ -988,8 +998,8 @@ impl Abi { /// Returns `true` if this is a single signed integer scalar #[inline] pub fn is_signed(&self) -> bool { - match *self { - Abi::Scalar(ref scal) => match scal.value { + match self { + Abi::Scalar(scal) => match scal.value { Primitive::Int(_, signed) => signed, _ => false, }, @@ -1058,7 +1068,7 @@ pub enum TagEncoding { }, } -#[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)] pub struct Niche { pub offset: Size, pub scalar: Scalar, @@ -1071,10 +1081,10 @@ impl Niche { } pub fn available(&self, cx: &C) -> u128 { - let Scalar { value, valid_range: ref v } = self.scalar; - let bits = value.size(cx).bits(); - assert!(bits <= 128); - let max_value = !0u128 >> (128 - bits); + let Scalar { value, valid_range: v } = self.scalar; + let size = value.size(cx); + assert!(size.bits() <= 128); + let max_value = size.unsigned_int_max(); // Find out how many values are outside the valid range. let niche = v.end.wrapping_add(1)..v.start; @@ -1084,10 +1094,10 @@ impl Niche { pub fn reserve(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> { assert!(count > 0); - let Scalar { value, valid_range: v } = self.scalar.clone(); - let bits = value.size(cx).bits(); - assert!(bits <= 128); - let max_value = !0u128 >> (128 - bits); + let Scalar { value, valid_range: v } = self.scalar; + let size = value.size(cx); + assert!(size.bits() <= 128); + let max_value = size.unsigned_int_max(); if count > max_value { return None; @@ -1138,7 +1148,7 @@ pub struct Layout { impl Layout { pub fn scalar(cx: &C, scalar: Scalar) -> Self { - let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar.clone()); + let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar); let size = scalar.value.size(cx); let align = scalar.value.align(cx); Layout { @@ -1264,25 +1274,22 @@ impl<'a, Ty> TyAndLayout<'a, Ty> { Ty: TyAbiInterface<'a, C>, C: HasDataLayout, { - let scalar_allows_raw_init = move |s: &Scalar| -> bool { + let scalar_allows_raw_init = move |s: Scalar| -> bool { if zero { // The range must contain 0. - s.valid_range.contains_zero() + s.valid_range.contains(0) } else { - // The range must include all values. `valid_range_exclusive` handles - // the wrap-around using target arithmetic; with wrap-around then the full - // range is one where `start == end`. - let range = s.valid_range_exclusive(cx); - range.start == range.end + // The range must include all values. + s.is_always_valid(cx) } }; // Check the ABI. - let valid = match &self.abi { + let valid = match self.abi { Abi::Uninhabited => false, // definitely UB Abi::Scalar(s) => scalar_allows_raw_init(s), Abi::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2), - Abi::Vector { element: s, count } => *count == 0 || scalar_allows_raw_init(s), + Abi::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s), Abi::Aggregate { .. } => true, // Fields are checked below. }; if !valid { diff --git a/compiler/rustc_trait_selection/src/traits/object_safety.rs b/compiler/rustc_trait_selection/src/traits/object_safety.rs index 57b8a84300ff9..0ecfda19141fa 100644 --- a/compiler/rustc_trait_selection/src/traits/object_safety.rs +++ b/compiler/rustc_trait_selection/src/traits/object_safety.rs @@ -465,9 +465,9 @@ fn virtual_call_violation_for_method<'tcx>( let param_env = tcx.param_env(method.def_id); - let abi_of_ty = |ty: Ty<'tcx>| -> Option<&Abi> { + let abi_of_ty = |ty: Ty<'tcx>| -> Option { match tcx.layout_of(param_env.and(ty)) { - Ok(layout) => Some(&layout.abi), + Ok(layout) => Some(layout.abi), Err(err) => { // #78372 tcx.sess.delay_span_bug(