Skip to content

Commit

Permalink
Auto merge of rust-lang#88327 - bonega:scalar_refactor, r=eddyb
Browse files Browse the repository at this point in the history
`WrappingRange` (rust-lang#88242) follow-up (`is_full_for`, `Scalar: Copy`, etc.)

Some changes related to feedback during rust-lang#88242
r? `@RalfJung`
  • Loading branch information
bors committed Sep 11, 2021
2 parents 4e880f8 + bc95994 commit 641e02f
Show file tree
Hide file tree
Showing 36 changed files with 230 additions and 270 deletions.
20 changes: 10 additions & 10 deletions compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
Expand Up @@ -92,9 +92,9 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
fn get_abi_param(&self, tcx: TyCtxt<'tcx>) -> SmallVec<[AbiParam; 2]> {
match self.mode {
PassMode::Ignore => smallvec![],
PassMode::Direct(attrs) => match &self.layout.abi {
PassMode::Direct(attrs) => match self.layout.abi {
Abi::Scalar(scalar) => smallvec![apply_arg_attrs_to_abi_param(
AbiParam::new(scalar_to_clif_type(tcx, scalar.clone())),
AbiParam::new(scalar_to_clif_type(tcx, scalar)),
attrs
)],
Abi::Vector { .. } => {
Expand All @@ -103,10 +103,10 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
}
_ => unreachable!("{:?}", self.layout.abi),
},
PassMode::Pair(attrs_a, attrs_b) => match &self.layout.abi {
PassMode::Pair(attrs_a, attrs_b) => match self.layout.abi {
Abi::ScalarPair(a, b) => {
let a = scalar_to_clif_type(tcx, a.clone());
let b = scalar_to_clif_type(tcx, b.clone());
let a = scalar_to_clif_type(tcx, a);
let b = scalar_to_clif_type(tcx, b);
smallvec![
apply_arg_attrs_to_abi_param(AbiParam::new(a), attrs_a),
apply_arg_attrs_to_abi_param(AbiParam::new(b), attrs_b),
Expand Down Expand Up @@ -139,20 +139,20 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
fn get_abi_return(&self, tcx: TyCtxt<'tcx>) -> (Option<AbiParam>, Vec<AbiParam>) {
match self.mode {
PassMode::Ignore => (None, vec![]),
PassMode::Direct(_) => match &self.layout.abi {
PassMode::Direct(_) => match self.layout.abi {
Abi::Scalar(scalar) => {
(None, vec![AbiParam::new(scalar_to_clif_type(tcx, scalar.clone()))])
(None, vec![AbiParam::new(scalar_to_clif_type(tcx, scalar))])
}
Abi::Vector { .. } => {
let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout).unwrap();
(None, vec![AbiParam::new(vector_ty)])
}
_ => unreachable!("{:?}", self.layout.abi),
},
PassMode::Pair(_, _) => match &self.layout.abi {
PassMode::Pair(_, _) => match self.layout.abi {
Abi::ScalarPair(a, b) => {
let a = scalar_to_clif_type(tcx, a.clone());
let b = scalar_to_clif_type(tcx, b.clone());
let a = scalar_to_clif_type(tcx, a);
let b = scalar_to_clif_type(tcx, b);
(None, vec![AbiParam::new(a), AbiParam::new(b)])
}
_ => unreachable!("{:?}", self.layout.abi),
Expand Down
4 changes: 2 additions & 2 deletions compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
Expand Up @@ -143,8 +143,8 @@ macro validate_simd_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
}

pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
let (element, count) = match &layout.abi {
Abi::Vector { element, count } => (element.clone(), *count),
let (element, count) = match layout.abi {
Abi::Vector { element, count } => (element, count),
_ => unreachable!(),
};

Expand Down
24 changes: 9 additions & 15 deletions compiler/rustc_codegen_cranelift/src/value_and_place.rs
Expand Up @@ -49,11 +49,7 @@ fn codegen_field<'tcx>(
}
}

fn scalar_pair_calculate_b_offset(
tcx: TyCtxt<'_>,
a_scalar: &Scalar,
b_scalar: &Scalar,
) -> Offset32 {
fn scalar_pair_calculate_b_offset(tcx: TyCtxt<'_>, a_scalar: Scalar, b_scalar: Scalar) -> Offset32 {
let b_offset = a_scalar.value.size(&tcx).align_to(b_scalar.value.align(&tcx).abi);
Offset32::new(b_offset.bytes().try_into().unwrap())
}
Expand Down Expand Up @@ -124,12 +120,10 @@ impl<'tcx> CValue<'tcx> {
match self.0 {
CValueInner::ByRef(ptr, None) => {
let clif_ty = match layout.abi {
Abi::Scalar(ref scalar) => scalar_to_clif_type(fx.tcx, scalar.clone()),
Abi::Vector { ref element, count } => {
scalar_to_clif_type(fx.tcx, element.clone())
.by(u16::try_from(count).unwrap())
.unwrap()
}
Abi::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar),
Abi::Vector { element, count } => scalar_to_clif_type(fx.tcx, element)
.by(u16::try_from(count).unwrap())
.unwrap(),
_ => unreachable!("{:?}", layout.ty),
};
let mut flags = MemFlags::new();
Expand All @@ -147,13 +141,13 @@ impl<'tcx> CValue<'tcx> {
let layout = self.1;
match self.0 {
CValueInner::ByRef(ptr, None) => {
let (a_scalar, b_scalar) = match &layout.abi {
let (a_scalar, b_scalar) = match layout.abi {
Abi::ScalarPair(a, b) => (a, b),
_ => unreachable!("load_scalar_pair({:?})", self),
};
let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar.clone());
let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar.clone());
let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar);
let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar);
let mut flags = MemFlags::new();
flags.set_notrap();
let val1 = ptr.load(fx, clif_ty1, flags);
Expand Down Expand Up @@ -564,7 +558,7 @@ impl<'tcx> CPlace<'tcx> {
to_ptr.store(fx, val, flags);
return;
}
Abi::ScalarPair(ref a_scalar, ref b_scalar) => {
Abi::ScalarPair(a_scalar, b_scalar) => {
let (value, extra) = from.load_scalar_pair(fx);
let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
to_ptr.store(fx, value, flags);
Expand Down
9 changes: 3 additions & 6 deletions compiler/rustc_codegen_llvm/src/abi.rs
Expand Up @@ -536,16 +536,13 @@ impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> {
}
_ => {}
}
if let abi::Abi::Scalar(ref scalar) = self.ret.layout.abi {
if let abi::Abi::Scalar(scalar) = self.ret.layout.abi {
// If the value is a boolean, the range is 0..2 and that ultimately
// become 0..0 when the type becomes i1, which would be rejected
// by the LLVM verifier.
if let Int(..) = scalar.value {
if !scalar.is_bool() {
let range = scalar.valid_range_exclusive(bx);
if range.start != range.end {
bx.range_metadata(callsite, range);
}
if !scalar.is_bool() && !scalar.is_always_valid(bx) {
bx.range_metadata(callsite, scalar.valid_range);
}
}
}
Expand Down
14 changes: 7 additions & 7 deletions compiler/rustc_codegen_llvm/src/asm.rs
Expand Up @@ -792,7 +792,7 @@ fn dummy_output_type(cx: &CodegenCx<'ll, 'tcx>, reg: InlineAsmRegClass) -> &'ll

/// Helper function to get the LLVM type for a Scalar. Pointers are returned as
/// the equivalent integer type.
fn llvm_asm_scalar_type(cx: &CodegenCx<'ll, 'tcx>, scalar: &Scalar) -> &'ll Type {
fn llvm_asm_scalar_type(cx: &CodegenCx<'ll, 'tcx>, scalar: Scalar) -> &'ll Type {
match scalar.value {
Primitive::Int(Integer::I8, _) => cx.type_i8(),
Primitive::Int(Integer::I16, _) => cx.type_i16(),
Expand All @@ -812,7 +812,7 @@ fn llvm_fixup_input(
reg: InlineAsmRegClass,
layout: &TyAndLayout<'tcx>,
) -> &'ll Value {
match (reg, &layout.abi) {
match (reg, layout.abi) {
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.value {
let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
Expand All @@ -835,7 +835,7 @@ fn llvm_fixup_input(
Abi::Vector { element, count },
) if layout.size.bytes() == 8 => {
let elem_ty = llvm_asm_scalar_type(bx.cx, element);
let vec_ty = bx.cx.type_vector(elem_ty, *count);
let vec_ty = bx.cx.type_vector(elem_ty, count);
let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
}
Expand Down Expand Up @@ -890,7 +890,7 @@ fn llvm_fixup_output(
reg: InlineAsmRegClass,
layout: &TyAndLayout<'tcx>,
) -> &'ll Value {
match (reg, &layout.abi) {
match (reg, layout.abi) {
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.value {
bx.extract_element(value, bx.const_i32(0))
Expand All @@ -910,8 +910,8 @@ fn llvm_fixup_output(
Abi::Vector { element, count },
) if layout.size.bytes() == 8 => {
let elem_ty = llvm_asm_scalar_type(bx.cx, element);
let vec_ty = bx.cx.type_vector(elem_ty, *count * 2);
let indices: Vec<_> = (0..*count).map(|x| bx.const_i32(x as i32)).collect();
let vec_ty = bx.cx.type_vector(elem_ty, count * 2);
let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect();
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
}
(InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
Expand Down Expand Up @@ -965,7 +965,7 @@ fn llvm_fixup_output_type(
reg: InlineAsmRegClass,
layout: &TyAndLayout<'tcx>,
) -> &'ll Type {
match (reg, &layout.abi) {
match (reg, layout.abi) {
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.value {
cx.type_vector(cx.type_i8(), 8)
Expand Down
25 changes: 12 additions & 13 deletions compiler/rustc_codegen_llvm/src/builder.rs
Expand Up @@ -18,12 +18,12 @@ use rustc_hir::def_id::DefId;
use rustc_middle::ty::layout::{LayoutError, LayoutOfHelpers, TyAndLayout};
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_span::Span;
use rustc_target::abi::{self, Align, Size};
use rustc_target::abi::{self, Align, Size, WrappingRange};
use rustc_target::spec::{HasTargetSpec, Target};
use std::borrow::Cow;
use std::ffi::CStr;
use std::iter;
use std::ops::{Deref, Range};
use std::ops::Deref;
use std::ptr;
use tracing::debug;

Expand Down Expand Up @@ -382,7 +382,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
val
}
}
fn to_immediate_scalar(&mut self, val: Self::Value, scalar: &abi::Scalar) -> Self::Value {
fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value {
if scalar.is_bool() {
return self.trunc(val, self.cx().type_i1());
}
Expand Down Expand Up @@ -460,16 +460,15 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
fn scalar_load_metadata<'a, 'll, 'tcx>(
bx: &mut Builder<'a, 'll, 'tcx>,
load: &'ll Value,
scalar: &abi::Scalar,
scalar: abi::Scalar,
) {
match scalar.value {
abi::Int(..) => {
let range = scalar.valid_range_exclusive(bx);
if range.start != range.end {
bx.range_metadata(load, range);
if !scalar.is_always_valid(bx) {
bx.range_metadata(load, scalar.valid_range);
}
}
abi::Pointer if !scalar.valid_range.contains_zero() => {
abi::Pointer if !scalar.valid_range.contains(0) => {
bx.nonnull_metadata(load);
}
_ => {}
Expand All @@ -489,17 +488,17 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
let llval = const_llval.unwrap_or_else(|| {
let load = self.load(place.layout.llvm_type(self), place.llval, place.align);
if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
if let abi::Abi::Scalar(scalar) = place.layout.abi {
scalar_load_metadata(self, load, scalar);
}
load
});
OperandValue::Immediate(self.to_immediate(llval, place.layout))
} else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
} else if let abi::Abi::ScalarPair(a, b) = place.layout.abi {
let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
let pair_ty = place.layout.llvm_type(self);

let mut load = |i, scalar: &abi::Scalar, align| {
let mut load = |i, scalar: abi::Scalar, align| {
let llptr = self.struct_gep(pair_ty, place.llval, i as u64);
let llty = place.layout.scalar_pair_element_llvm_type(self, i, false);
let load = self.load(llty, llptr, align);
Expand Down Expand Up @@ -555,7 +554,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
next_bx
}

fn range_metadata(&mut self, load: &'ll Value, range: Range<u128>) {
fn range_metadata(&mut self, load: &'ll Value, range: WrappingRange) {
if self.sess().target.arch == "amdgpu" {
// amdgpu/LLVM does something weird and thinks an i64 value is
// split into a v2i32, halving the bitwidth LLVM expects,
Expand All @@ -568,7 +567,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let llty = self.cx.val_ty(load);
let v = [
self.cx.const_uint_big(llty, range.start),
self.cx.const_uint_big(llty, range.end),
self.cx.const_uint_big(llty, range.end.wrapping_add(1)),
];

llvm::LLVMSetMetadata(
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_codegen_llvm/src/common.rs
Expand Up @@ -228,7 +228,7 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
})
}

fn scalar_to_backend(&self, cv: Scalar, layout: &abi::Scalar, llty: &'ll Type) -> &'ll Value {
fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: &'ll Type) -> &'ll Value {
let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() };
match cv {
Scalar::Int(ScalarInt::ZST) => {
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_codegen_llvm/src/consts.rs
Expand Up @@ -111,7 +111,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll
Pointer::new(alloc_id, Size::from_bytes(ptr_offset)),
&cx.tcx,
),
&Scalar { value: Primitive::Pointer, valid_range: WrappingRange { start: 0, end: !0 } },
Scalar { value: Primitive::Pointer, valid_range: WrappingRange { start: 0, end: !0 } },
cx.type_i8p_ext(address_space),
));
next_offset = offset + pointer_size;
Expand Down
18 changes: 7 additions & 11 deletions compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
Expand Up @@ -1656,7 +1656,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> {
Variants::Multiple {
tag_encoding:
TagEncoding::Niche { ref niche_variants, niche_start, dataful_variant },
ref tag,
tag,
ref variants,
tag_field,
} => {
Expand Down Expand Up @@ -2082,19 +2082,17 @@ fn prepare_enum_metadata(

let layout = cx.layout_of(enum_type);

if let (
&Abi::Scalar(_),
&Variants::Multiple { tag_encoding: TagEncoding::Direct, ref tag, .. },
) = (&layout.abi, &layout.variants)
if let (Abi::Scalar(_), Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. }) =
(layout.abi, &layout.variants)
{
return FinalMetadata(discriminant_type_metadata(tag.value));
}

if use_enum_fallback(cx) {
let discriminant_type_metadata = match layout.variants {
Variants::Single { .. } => None,
Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, ref tag, .. }
| Variants::Multiple { tag_encoding: TagEncoding::Direct, ref tag, .. } => {
Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, .. }
| Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. } => {
Some(discriminant_type_metadata(tag.value))
}
};
Expand Down Expand Up @@ -2146,9 +2144,7 @@ fn prepare_enum_metadata(
// A single-variant enum has no discriminant.
Variants::Single { .. } => None,

Variants::Multiple {
tag_encoding: TagEncoding::Niche { .. }, ref tag, tag_field, ..
} => {
Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, tag_field, .. } => {
// Find the integer type of the correct size.
let size = tag.value.size(cx);
let align = tag.value.align(cx);
Expand Down Expand Up @@ -2179,7 +2175,7 @@ fn prepare_enum_metadata(
}
}

Variants::Multiple { tag_encoding: TagEncoding::Direct, ref tag, tag_field, .. } => {
Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, tag_field, .. } => {
let discr_type = tag.value.to_ty(cx.tcx);
let (size, align) = cx.size_and_align_of(discr_type);

Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_codegen_llvm/src/intrinsic.rs
Expand Up @@ -133,7 +133,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
}
sym::va_arg => {
match fn_abi.ret.layout.abi {
abi::Abi::Scalar(ref scalar) => {
abi::Abi::Scalar(scalar) => {
match scalar.value {
Primitive::Int(..) => {
if self.cx().size_of(ret_ty).bytes() < 4 {
Expand Down

0 comments on commit 641e02f

Please sign in to comment.