From 4ffff8f00a52ac10110d018b8521b8c50fbda444 Mon Sep 17 00:00:00 2001 From: Richard Diamond Date: Wed, 12 Sep 2018 11:17:42 -0500 Subject: [PATCH 1/3] Make `librustc_codegen_llvm` aware of LLVM address spaces. In order to not require overloading functions based on their argument's address space (among other things), we require the presence of a "flat" (ie an address space which is shared with every other address space) address space. This isn't exposed in any way to Rust code. This just makes Rust compatible with LLVM target machines which, for example, place allocas in a different address space. `amdgcn-amd-amdhsa-amdgiz` is a specific example, which places allocas in address space 5 or the private (at the work item level) address space. --- src/librustc_codegen_llvm/abi.rs | 10 +- src/librustc_codegen_llvm/builder.rs | 78 +++++++++- src/librustc_codegen_llvm/common.rs | 39 ++++- src/librustc_codegen_llvm/consts.rs | 68 ++++++-- src/librustc_codegen_llvm/context.rs | 143 +++++++++++++++-- src/librustc_codegen_llvm/debuginfo/gdb.rs | 4 +- src/librustc_codegen_llvm/declare.rs | 17 +- src/librustc_codegen_llvm/intrinsic.rs | 8 +- src/librustc_codegen_llvm/llvm/ffi.rs | 13 +- src/librustc_codegen_llvm/mono_item.rs | 8 +- src/librustc_codegen_llvm/type_.rs | 52 ++++++- src/librustc_codegen_llvm/type_of.rs | 10 +- src/librustc_codegen_ssa/mir/block.rs | 5 + src/librustc_codegen_ssa/mir/place.rs | 12 ++ src/librustc_codegen_ssa/mir/rvalue.rs | 36 ++++- src/librustc_codegen_ssa/traits/builder.rs | 13 ++ src/librustc_codegen_ssa/traits/consts.rs | 10 +- src/librustc_codegen_ssa/traits/declare.rs | 10 +- src/librustc_codegen_ssa/traits/misc.rs | 7 + src/librustc_codegen_ssa/traits/mod.rs | 14 ++ src/librustc_codegen_ssa/traits/type_.rs | 77 ++++++++- src/librustc_target/abi/mod.rs | 155 ++++++++++++++++-- src/librustc_target/spec/mod.rs | 173 ++++++++++++++++++++- src/rustllvm/RustWrapper.cpp | 26 +++- 24 files changed, 897 insertions(+), 91 deletions(-) diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 258d839d32e82..5ca295733b2e5 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -649,10 +649,11 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { PassMode::Ignore => cx.type_void(), PassMode::Direct(_) | PassMode::Pair(..) => { self.ret.layout.immediate_llvm_type(cx) + .copy_addr_space(cx.flat_addr_space()) } PassMode::Cast(cast) => cast.llvm_type(cx), PassMode::Indirect(..) => { - llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx))); + llargument_tys.push(cx.type_ptr_to_flat(self.ret.memory_ty(cx))); cx.type_void() } }; @@ -665,8 +666,11 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { let llarg_ty = match arg.mode { PassMode::Ignore => continue, - PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx), + PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx) + .copy_addr_space(cx.flat_addr_space()), PassMode::Pair(..) => { + // Keep the argument type address space given by + // `scalar_pair_element_llvm_type`. llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true)); llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true)); continue; @@ -679,7 +683,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { continue; } PassMode::Cast(cast) => cast.llvm_type(cx), - PassMode::Indirect(_, None) => cx.type_ptr_to(arg.memory_ty(cx)), + PassMode::Indirect(_, None) => cx.type_ptr_to_flat(arg.memory_ty(cx)), }; llargument_tys.push(llarg_ty); } diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index b79d0da0bcd06..6f6a75e18fada 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -2,7 +2,7 @@ use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; use llvm::{self, False, BasicBlock}; use rustc_codegen_ssa::common::{IntPredicate, TypeKind, RealPredicate}; use rustc_codegen_ssa::{self, MemFlags}; -use common::Funclet; +use common::{Funclet, val_addr_space, val_addr_space_opt}; use context::CodegenCx; use type_::Type; use type_of::LayoutLlvmExt; @@ -18,6 +18,7 @@ use syntax; use rustc_codegen_ssa::base::to_immediate; use rustc_codegen_ssa::mir::operand::{OperandValue, OperandRef}; use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_target::spec::AddrSpaceIdx; use std::borrow::Cow; use std::ffi::CStr; use std::ops::{Deref, Range}; @@ -846,6 +847,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("ptrtoint"); + let val = self.flat_addr_cast(val); unsafe { llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, noname()) } @@ -853,6 +855,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("inttoptr"); + let dest_ty = dest_ty.copy_addr_space(self.cx().flat_addr_space()); unsafe { llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, noname()) } @@ -860,12 +863,43 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("bitcast"); + let dest_ty = dest_ty.copy_addr_space(val_addr_space(val)); unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, noname()) } } + /// address space casts, then bitcasts to dest_ty without changing address spaces. + fn as_ptr_cast(&mut self, val: &'ll Value, + addr_space: AddrSpaceIdx, + dest_ty: &'ll Type) -> &'ll Value + { + let val = self.addrspace_cast(val, addr_space); + self.pointercast(val, dest_ty.copy_addr_space(addr_space)) + } + fn flat_as_ptr_cast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.as_ptr_cast(val, self.cx().flat_addr_space(), dest_ty) + } + fn addrspace_cast(&mut self, val: &'ll Value, dest: AddrSpaceIdx) -> &'ll Value { + // LLVM considers no-op address space casts to be invalid. + let src_ty = self.cx.val_ty(val); + if src_ty.is_ptr() && src_ty.address_space() != dest { + let dest_ty = src_ty.copy_addr_space(dest); + self.cx().check_addr_space_cast(val, dest_ty); + self.count_insn("addrspacecast"); + unsafe { + llvm::LLVMBuildAddrSpaceCast(self.llbuilder, val, + dest_ty, noname()) + } + } else { + val + } + } + + fn flat_addr_cast(&mut self, val: &'ll Value) -> &'ll Value { + self.addrspace_cast(val, self.cx().flat_addr_space()) + } fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value { self.count_insn("intcast"); unsafe { @@ -875,6 +909,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("pointercast"); + let dest_ty = dest_ty.copy_addr_space(val_addr_space(val)); unsafe { llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname()) } @@ -883,7 +918,18 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { /* Comparisons */ fn icmp(&mut self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("icmp"); + let op = llvm::IntPredicate::from_generic(op); + + match (val_addr_space_opt(lhs), val_addr_space_opt(rhs)) { + (Some(l), Some(r)) if l == r => {}, + (Some(l), Some(r)) if l != r => { + bug!("tried to cmp ptrs of different addr spaces: lhs {:?} rhs {:?}", + lhs, rhs); + }, + _ => {}, + } + unsafe { llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) } @@ -1004,7 +1050,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { flags: MemFlags, ) { let ptr_width = &self.sess().target.target.target_pointer_width; - let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); + let addr_space = self.val_ty(ptr).address_space(); + let intrinsic_key = format!("llvm.memset.p{}i8.i{}", addr_space, ptr_width); let llintrinsicfn = self.get_intrinsic(&intrinsic_key); let ptr = self.pointercast(ptr, self.type_i8p()); let align = self.const_u32(align.bytes() as u32); @@ -1352,7 +1399,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { ptr: &'ll Value) -> &'ll Value { let dest_ptr_ty = self.cx.val_ty(ptr); let stored_ty = self.cx.val_ty(val); - let stored_ptr_ty = self.cx.type_ptr_to(stored_ty); + let stored_ptr_ty = self.cx.type_as_ptr_to(stored_ty, + dest_ptr_ty.address_space()); assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer); @@ -1398,7 +1446,18 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { debug!("Type mismatch in function call of {:?}. \ Expected {:?} for param {}, got {:?}; injecting bitcast", llfn, expected_ty, i, actual_ty); - self.bitcast(actual_val, expected_ty) + if expected_ty.is_ptr() && actual_ty.is_ptr() { + let actual_val = self.addrspace_cast(actual_val, + expected_ty.address_space()); + self.pointercast(actual_val, expected_ty) + } else { + let actual_val = if actual_ty.is_ptr() { + self.flat_addr_cast(actual_val) + } else { + actual_val + }; + self.bitcast(actual_val, expected_ty) + } } else { actual_val } @@ -1488,7 +1547,16 @@ impl Builder<'a, 'll, 'tcx> { return; } - let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic); + let addr_space = self.cx.val_ty(ptr).address_space(); + // Old LLVMs don't have the address space specific intrinsics. + // So as a semi-crude workaround, don't specialize if in the + // default address space. + let lifetime_intrinsic = if let AddrSpaceIdx(0) = addr_space { + self.cx.get_intrinsic(intrinsic) + } else { + let intrinsic = format!("{}.p{}i8", intrinsic, addr_space); + self.cx.get_intrinsic(&intrinsic) + }; let ptr = self.pointercast(ptr, self.cx.type_i8p()); self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None); diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index 675d6ccb5041d..a5aba2dea1de3 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -13,7 +13,9 @@ use rustc_codegen_ssa::traits::*; use rustc::ty::layout::{HasDataLayout, LayoutOf, self, TyLayout, Size}; use rustc::mir::interpret::{Scalar, AllocKind, Allocation}; use consts::const_alloc_to_llvm; +use rustc_codegen_ssa::common::TypeKind; use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_target::spec::AddrSpaceIdx; use libc::{c_uint, c_char}; @@ -170,9 +172,9 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { s.len() as c_uint, !null_terminated as Bool); let sym = self.generate_local_symbol_name("str"); - let g = self.define_global(&sym[..], self.val_ty(sc)).unwrap_or_else(||{ - bug!("symbol `{}` is already defined", sym); - }); + let addr_space = self.const_addr_space(); + let g = self.define_global(&sym[..], self.val_ty(sc), addr_space) + .unwrap_or_else(|| bug!("symbol `{}` is already defined", sym) ); llvm::LLVMSetInitializer(g, sc); llvm::LLVMSetGlobalConstant(g, True); llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage); @@ -284,6 +286,10 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } + fn const_as_cast(&self, val: &'ll Value, addr_space: AddrSpaceIdx) -> &'ll Value { + self.const_addrcast(val, addr_space) + } + fn scalar_to_backend( &self, cv: Scalar, @@ -299,10 +305,16 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { Scalar::Bits { bits, size } => { assert_eq!(size as u64, layout.value.size(self).bytes()); let llval = self.const_uint_big(self.type_ix(bitsize), bits); - if layout.value == layout::Pointer { - unsafe { llvm::LLVMConstIntToPtr(llval, llty) } + let flat_llty = llty.copy_addr_space(self.flat_addr_space()); + let llval = if layout.value == layout::Pointer { + unsafe { llvm::LLVMConstIntToPtr(llval, flat_llty) } } else { - self.const_bitcast(llval, llty) + self.const_bitcast(llval, flat_llty) + }; + if llty.is_ptr() { + self.const_as_cast(llval, llty.address_space()) + } else { + llval } }, Scalar::Ptr(ptr) => { @@ -311,7 +323,8 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { Some(AllocKind::Memory(alloc)) => { let init = const_alloc_to_llvm(self, alloc); if alloc.mutability == Mutability::Mutable { - self.static_addr_of_mut(init, alloc.align, None) + self.static_addr_of_mut(init, alloc.align, None, + self.mutable_addr_space()) } else { self.static_addr_of(init, alloc.align, None) } @@ -330,6 +343,7 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { &self.const_usize(ptr.offset.bytes()), 1, ) }; + let llval = self.const_flat_as_cast(llval); if layout.value != layout::Pointer { unsafe { llvm::LLVMConstPtrToInt(llval, llty) } } else { @@ -367,6 +381,17 @@ pub fn val_ty(v: &'ll Value) -> &'ll Type { llvm::LLVMTypeOf(v) } } +pub fn val_addr_space_opt(v: &'ll Value) -> Option { + let ty = val_ty(v); + if ty.kind() == TypeKind::Pointer { + Some(ty.address_space()) + } else { + None + } +} +pub fn val_addr_space(v: &'ll Value) -> AddrSpaceIdx { + val_addr_space_opt(v).unwrap_or_default() +} pub fn bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { unsafe { diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs index b7a9382c338bd..703211aa924fb 100644 --- a/src/librustc_codegen_llvm/consts.rs +++ b/src/librustc_codegen_llvm/consts.rs @@ -6,7 +6,7 @@ use rustc::mir::interpret::{ConstValue, Allocation, read_target_uint, use rustc::hir::Node; use debuginfo; use monomorphize::MonoItem; -use common::CodegenCx; +use common::{CodegenCx, val_addr_space, val_addr_space_opt}; use monomorphize::Instance; use syntax_pos::Span; use rustc_target::abi::HasDataLayout; @@ -17,6 +17,7 @@ use type_of::LayoutLlvmExt; use value::Value; use rustc::ty::{self, Ty}; use rustc_codegen_ssa::traits::*; +use rustc_target::spec::AddrSpaceIdx; use rustc::ty::layout::{self, Size, Align, LayoutOf}; @@ -123,7 +124,7 @@ fn check_and_apply_linkage( }; unsafe { // Declare a symbol `foo` with the desired linkage. - let g1 = cx.declare_global(&sym, llty2); + let g1 = cx.declare_global(&sym, llty2, cx.flat_addr_space()); llvm::LLVMRustSetLinkage(g1, base::linkage_to_llvm(linkage)); // Declare an internal global `extern_with_linkage_foo` which @@ -134,7 +135,8 @@ fn check_and_apply_linkage( // zero. let mut real_name = "_rust_extern_with_linkage_".to_string(); real_name.push_str(&sym); - let g2 = cx.define_global(&real_name, llty).unwrap_or_else(||{ + let g2 = cx.define_global(&real_name, llty, cx.flat_addr_space()) + .unwrap_or_else(||{ if let Some(span) = span { cx.sess().span_fatal( span, @@ -151,11 +153,13 @@ fn check_and_apply_linkage( } else { // Generate an external declaration. // FIXME(nagisa): investigate whether it can be changed into define_global - cx.declare_global(&sym, llty) + cx.declare_global(&sym, llty, cx.flat_addr_space()) } } +/// Won't change address spaces pub fn ptrcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value { + let ty = ty.copy_addr_space(val_addr_space(val)); unsafe { llvm::LLVMConstPointerCast(val, ty) } @@ -163,29 +167,48 @@ pub fn ptrcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value { impl CodegenCx<'ll, 'tcx> { crate fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value { + let ty = if let Some(addr_space) = val_addr_space_opt(val) { + ty.copy_addr_space(addr_space) + } else { + ty + }; unsafe { llvm::LLVMConstBitCast(val, ty) } } + crate fn const_addrcast(&self, val: &'ll Value, addr_space: AddrSpaceIdx) -> &'ll Value { + let src_ty = self.val_ty(val); + if src_ty.is_ptr() && src_ty.address_space() != addr_space { + let dest_ty = src_ty.copy_addr_space(addr_space); + self.check_addr_space_cast(val, dest_ty); + unsafe { + llvm::LLVMConstAddrSpaceCast(val, dest_ty) + } + } else { + val + } + } + crate fn static_addr_of_mut( &self, cv: &'ll Value, align: Align, kind: Option<&str>, + addr_space: AddrSpaceIdx, ) -> &'ll Value { unsafe { let gv = match kind { Some(kind) if !self.tcx.sess.fewer_names() => { let name = self.generate_local_symbol_name(kind); let gv = self.define_global(&name[..], - self.val_ty(cv)).unwrap_or_else(||{ + self.val_ty(cv), addr_space).unwrap_or_else(||{ bug!("symbol `{}` is already defined", name); }); llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage); gv }, - _ => self.define_private_global(self.val_ty(cv)), + _ => self.define_private_global(self.val_ty(cv), addr_space), }; llvm::LLVMSetInitializer(gv, cv); set_global_alignment(&self, gv, align); @@ -218,13 +241,18 @@ impl CodegenCx<'ll, 'tcx> { let llty = self.layout_of(ty).llvm_type(self); let (g, attrs) = match self.tcx.hir().get(id) { Node::Item(&hir::Item { - ref attrs, span, node: hir::ItemKind::Static(..), .. + ref attrs, span, node: hir::ItemKind::Static(_, m, _), .. }) => { if self.get_declared_value(&sym[..]).is_some() { span_bug!(span, "Conflicting symbol names for static?"); } + let addr_space = if m == hir::MutMutable || !self.type_is_freeze(ty) { + self.mutable_addr_space() + } else { + self.const_addr_space() + }; - let g = self.define_global(&sym[..], llty).unwrap(); + let g = self.define_global(&sym[..], llty, addr_space).unwrap(); if !self.tcx.is_reachable_non_generic(def_id) { unsafe { @@ -334,7 +362,8 @@ impl StaticMethods for CodegenCx<'ll, 'tcx> { } return gv; } - let gv = self.static_addr_of_mut(cv, align, kind); + let gv = self.static_addr_of_mut(cv, align, kind, + self.const_addr_space()); unsafe { llvm::LLVMSetGlobalConstant(gv, True); } @@ -370,6 +399,11 @@ impl StaticMethods for CodegenCx<'ll, 'tcx> { let instance = Instance::mono(self.tcx, def_id); let ty = instance.ty(self.tcx); + + // As an optimization, all shared statics which do not have interior + // mutability are placed into read-only memory. + let llvm_mutable = is_mutable || !self.type_is_freeze(ty); + let llty = self.layout_of(ty).llvm_type(self); let g = if val_llty == llty { g @@ -384,8 +418,15 @@ impl StaticMethods for CodegenCx<'ll, 'tcx> { let linkage = llvm::LLVMRustGetLinkage(g); let visibility = llvm::LLVMRustGetVisibility(g); + let addr_space = if llvm_mutable { + self.mutable_addr_space() + } else { + self.const_addr_space() + }; + let new_g = llvm::LLVMRustGetOrInsertGlobal( - self.llmod, name_string.as_ptr(), val_llty); + self.llmod, name_string.as_ptr(), val_llty, + addr_space.0); llvm::LLVMRustSetLinkage(new_g, linkage); llvm::LLVMRustSetVisibility(new_g, visibility); @@ -401,10 +442,8 @@ impl StaticMethods for CodegenCx<'ll, 'tcx> { // As an optimization, all shared statics which do not have interior // mutability are placed into read-only memory. - if !is_mutable { - if self.type_is_freeze(ty) { - llvm::LLVMSetGlobalConstant(g, llvm::True); - } + if !llvm_mutable { + llvm::LLVMSetGlobalConstant(g, llvm::True); } debuginfo::create_global_var_metadata(&self, def_id, g); @@ -480,6 +519,7 @@ impl StaticMethods for CodegenCx<'ll, 'tcx> { if attrs.flags.contains(CodegenFnAttrFlags::USED) { // This static will be stored in the llvm.used variable which is an array of i8* + // Note this ignores the address space of `g`, but that's okay here. let cast = llvm::LLVMConstPointerCast(g, self.type_i8p()); self.used_statics.borrow_mut().push(cast); } diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index 2b03e99161db8..de06a6f5c038a 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -7,7 +7,7 @@ use monomorphize::Instance; use value::Value; use monomorphize::partitioning::CodegenUnit; -use type_::Type; +use type_::{Type, AddrSpaceIdx, AddrSpaceKind, }; use type_of::PointeeInfo; use rustc_codegen_ssa::traits::*; use libc::c_uint; @@ -20,7 +20,8 @@ use rustc::session::Session; use rustc::ty::layout::{LayoutError, LayoutOf, Size, TyLayout, VariantIdx}; use rustc::ty::{self, Ty, TyCtxt}; use rustc::util::nodemap::FxHashMap; -use rustc_target::spec::{HasTargetSpec, Target}; +use rustc_target::abi::HasDataLayout; +use rustc_target::spec::{HasTargetSpec, Target, AddrSpaceProps, }; use rustc_codegen_ssa::callee::resolve_and_get_fn; use rustc_codegen_ssa::base::wants_msvc_seh; use callee::get_fn; @@ -32,6 +33,7 @@ use std::str; use std::sync::Arc; use syntax::symbol::LocalInternedString; use abi::Abi; +use std::u32; /// There is one `CodegenCx` per compilation unit. Each one has its own LLVM /// `llvm::Context` so that several compilation units may be optimized in parallel. @@ -83,13 +85,18 @@ pub struct CodegenCx<'ll, 'tcx: 'll> { pub pointee_infos: RefCell, Size), Option>>, pub isize_ty: &'ll Type, + alloca_addr_space: AddrSpaceIdx, + const_addr_space: AddrSpaceIdx, + mutable_addr_space: AddrSpaceIdx, + flat_addr_space: AddrSpaceIdx, + pub dbg_cx: Option>, eh_personality: Cell>, eh_unwind_resume: Cell>, pub rust_try_fn: Cell>, - intrinsics: RefCell>, + intrinsics: RefCell>, /// A counter that is used for generating local symbol names local_gen_sym_counter: Cell, @@ -276,6 +283,23 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { let isize_ty = Type::ix_llcx(llcx, tcx.data_layout.pointer_size.bits()); + let alloca_addr_space = tcx.data_layout().alloca_address_space; + let mutable_addr_space = + tcx.sess.target.target.options.addr_spaces + .get(&AddrSpaceKind::ReadWrite) + .map(|v| v.index ) + .unwrap_or_default(); + let const_addr_space = + tcx.sess.target.target.options.addr_spaces + .get(&AddrSpaceKind::ReadOnly) + .map(|v| v.index ) + .unwrap_or(mutable_addr_space); + let flat_addr_space = + tcx.sess.target.target.options.addr_spaces + .get(&AddrSpaceKind::Flat) + .map(|v| v.index ) + .unwrap_or_default(); + CodegenCx { tcx, check_overflow, @@ -296,6 +320,12 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { scalar_lltypes: Default::default(), pointee_infos: Default::default(), isize_ty, + + alloca_addr_space, + const_addr_space, + mutable_addr_space, + flat_addr_space, + dbg_cx, eh_personality: Cell::new(None), eh_unwind_resume: Cell::new(None), @@ -453,6 +483,32 @@ impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> { llvm::LLVMSetSection(g, section.as_ptr()); } } + fn can_cast_addr_space(&self, from: AddrSpaceIdx, to: AddrSpaceIdx) -> bool { + if from == to { return true; } + + let bug = || { + bug!("no address space kind for {}", from); + }; + + let (to_kind, _) = self.addr_space_props_from_idx(to) + .unwrap_or_else(&bug); + let (_, from_props) = self.addr_space_props_from_idx(from) + .unwrap_or_else(&bug); + + from_props.shared_with.contains(&to_kind) + } + fn alloca_addr_space(&self) -> AddrSpaceIdx { + self.alloca_addr_space + } + fn const_addr_space(&self) -> AddrSpaceIdx { + self.const_addr_space + } + fn mutable_addr_space(&self) -> AddrSpaceIdx { + self.mutable_addr_space + } + fn flat_addr_space(&self) -> AddrSpaceIdx { + self.flat_addr_space + } } impl CodegenCx<'b, 'tcx> { @@ -473,7 +529,7 @@ impl CodegenCx<'b, 'tcx> { if key == $name { let f = self.declare_cfn($name, self.type_func(&[], $ret)); llvm::SetUnnamedAddr(f, false); - self.intrinsics.borrow_mut().insert($name, f.clone()); + self.intrinsics.borrow_mut().insert($name.to_string(), f.clone()); return Some(f); } ); @@ -481,7 +537,7 @@ impl CodegenCx<'b, 'tcx> { if key == $name { let f = self.declare_cfn($name, self.type_variadic_func(&[], $ret)); llvm::SetUnnamedAddr(f, false); - self.intrinsics.borrow_mut().insert($name, f.clone()); + self.intrinsics.borrow_mut().insert($name.to_string(), f.clone()); return Some(f); } ); @@ -489,7 +545,7 @@ impl CodegenCx<'b, 'tcx> { if key == $name { let f = self.declare_cfn($name, self.type_func(&[$($arg),*], $ret)); llvm::SetUnnamedAddr(f, false); - self.intrinsics.borrow_mut().insert($name, f.clone()); + self.intrinsics.borrow_mut().insert($name.to_string(), f.clone()); return Some(f); } ); @@ -518,9 +574,53 @@ impl CodegenCx<'b, 'tcx> { let t_v4f64 = self.type_vector(t_f64, 4); let t_v8f64 = self.type_vector(t_f64, 8); - ifn!("llvm.memset.p0i8.i16", fn(i8p, t_i8, t_i16, t_i32, i1) -> void); - ifn!("llvm.memset.p0i8.i32", fn(i8p, t_i8, t_i32, t_i32, i1) -> void); - ifn!("llvm.memset.p0i8.i64", fn(i8p, t_i8, t_i64, t_i32, i1) -> void); + fn parse_addr_space(s: &str) -> AddrSpaceIdx { + assert!(s.starts_with("p")); + assert!(s.ends_with("i8")); + let s = &s[1..]; + let s = &s[..s.len() - 2]; + AddrSpaceIdx(u32::from_str_radix(s, 10).unwrap()) + } + + if key.starts_with("llvm.memcpy") || key.starts_with("llvm.memmove") || + key.starts_with("llvm.memset") { + + let mut split = key.split('.'); + assert_eq!(Some("llvm"), split.next()); + let flavor = split.next(); + let flavor = flavor.unwrap(); + + let dst_ptr_str = split.next(); + assert!(dst_ptr_str.is_some()); + let dst_ptr_str = dst_ptr_str.unwrap(); + let dst_asp = parse_addr_space(dst_ptr_str); + let dst_ty = self.type_i8p_as(dst_asp); + + let src_ty = if flavor != "memset" { + let src_ptr_str = split.next(); + assert!(src_ptr_str.is_some()); + let src_ptr_str = src_ptr_str.unwrap(); + let src_asp = parse_addr_space(src_ptr_str); + self.type_i8p_as(src_asp) + } else { + t_i8 + }; + + let len_ty = match split.next() { + Some("i16") => t_i16, + Some("i32") => t_i32, + Some("i64") => t_i64, + Some("i128") => t_i128, + l => { + bug!("unknown llvm.{} intrinsic sig (len ty): {}, {:?}", flavor, key, l); + }, + }; + let fty = self.type_func(&[dst_ty, src_ty, len_ty, t_i32, i1], &void); + let f = self.declare_cfn(key, fty); + llvm::SetUnnamedAddr(f, false); + self.intrinsics.borrow_mut().insert(key.to_string(), f.clone()); + return Some(f); + } ifn!("llvm.trap", fn() -> void); ifn!("llvm.debugtrap", fn() -> void); @@ -759,6 +859,24 @@ impl CodegenCx<'b, 'tcx> { ifn!("llvm.lifetime.start", fn(t_i64,i8p) -> void); ifn!("llvm.lifetime.end", fn(t_i64, i8p) -> void); + if key.starts_with("llvm.lifetime") { + let mut split = key.split('.'); + split.next(); split.next(); + + let _variant = split.next(); + + let addr_space = match split.next() { + Some(addr_space) => parse_addr_space(addr_space), + None => unreachable!(), + }; + + let fty = self.type_func(&[t_i64, self.type_i8p_as(addr_space)], &void); + let f = self.declare_cfn(key, fty); + llvm::SetUnnamedAddr(f, false); + self.intrinsics.borrow_mut().insert(key.to_string(), f.clone()); + return Some(f); + } + ifn!("llvm.expect.i1", fn(i1, i1) -> i1); ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32); ifn!("llvm.localescape", fn(...) -> void); @@ -795,6 +913,13 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx> { base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name); name } + + pub fn addr_space_props_from_idx(&self, idx: AddrSpaceIdx) + -> Option<(&AddrSpaceKind, &AddrSpaceProps)> + { + self.tcx.sess.target.target.options.addr_spaces.iter() + .find(|&(_, ref props)| props.index == idx ) + } } impl ty::layout::HasDataLayout for CodegenCx<'ll, 'tcx> { diff --git a/src/librustc_codegen_llvm/debuginfo/gdb.rs b/src/librustc_codegen_llvm/debuginfo/gdb.rs index c883d6030951d..d9b1c3deca94c 100644 --- a/src/librustc_codegen_llvm/debuginfo/gdb.rs +++ b/src/librustc_codegen_llvm/debuginfo/gdb.rs @@ -46,9 +46,9 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_>) unsafe { let llvm_type = cx.type_array(cx.type_i8(), section_contents.len() as u64); - + let addr_space = cx.flat_addr_space(); let section_var = cx.define_global(section_var_name, - llvm_type).unwrap_or_else(||{ + llvm_type, addr_space).unwrap_or_else(||{ bug!("symbol `{}` is already defined", section_var_name) }); llvm::LLVMSetSection(section_var, section_name.as_ptr() as *const _); diff --git a/src/librustc_codegen_llvm/declare.rs b/src/librustc_codegen_llvm/declare.rs index aa2a0016b3e7a..8489e7a98135b 100644 --- a/src/librustc_codegen_llvm/declare.rs +++ b/src/librustc_codegen_llvm/declare.rs @@ -17,6 +17,7 @@ use rustc::ty::{self, PolyFnSig}; use rustc::ty::layout::LayoutOf; use rustc::session::config::Sanitizer; use rustc_data_structures::small_c_str::SmallCStr; +use rustc_target::spec::AddrSpaceIdx; use abi::{FnType, FnTypeExt}; use attributes; use context::CodegenCx; @@ -85,12 +86,15 @@ impl DeclareMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn declare_global( &self, - name: &str, ty: &'ll Type + name: &str, + ty: &'ll Type, + addr_space: AddrSpaceIdx, ) -> &'ll Value { debug!("declare_global(name={:?})", name); let namebuf = SmallCStr::new(name); unsafe { - llvm::LLVMRustGetOrInsertGlobal(self.llmod, namebuf.as_ptr(), ty) + llvm::LLVMRustGetOrInsertGlobal(self.llmod, namebuf.as_ptr(), ty, + addr_space.0) } } @@ -126,18 +130,19 @@ impl DeclareMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn define_global( &self, name: &str, - ty: &'ll Type + ty: &'ll Type, + addr_space: AddrSpaceIdx ) -> Option<&'ll Value> { if self.get_defined_value(name).is_some() { None } else { - Some(self.declare_global(name, ty)) + Some(self.declare_global(name, ty, addr_space)) } } - fn define_private_global(&self, ty: &'ll Type) -> &'ll Value { + fn define_private_global(&self, ty: &'ll Type, addr_space: AddrSpaceIdx) -> &'ll Value { unsafe { - llvm::LLVMRustInsertPrivateGlobal(self.llmod, ty) + llvm::LLVMRustInsertPrivateGlobal(self.llmod, ty, addr_space.0) } } diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index eeb6a64164e9c..07fcd7db9e56f 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -739,7 +739,9 @@ fn try_intrinsic( if bx.sess().no_landing_pads() { bx.call(func, &[data], None); let ptr_align = bx.tcx().data_layout.pointer_align.abi; - bx.store(bx.const_null(bx.type_i8p()), dest, ptr_align); + let addr_space = bx.type_addr_space(bx.val_ty(dest)).unwrap(); + bx.store(bx.const_null(bx.type_i8p_as(addr_space)), + dest, ptr_align); } else if wants_msvc_seh(bx.sess()) { codegen_msvc_try(bx, func, data, local_ptr, dest); } else { @@ -903,10 +905,10 @@ fn codegen_gnu_try( // rust_try ignores the selector. let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false); let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1); - catch.add_clause(vals, bx.const_null(bx.type_i8p())); + catch.add_clause(vals, bx.const_null(bx.type_flat_i8p())); let ptr = catch.extract_value(vals, 0); let ptr_align = bx.tcx().data_layout.pointer_align.abi; - let bitcast = catch.bitcast(local_ptr, bx.type_ptr_to(bx.type_i8p())); + let bitcast = catch.bitcast(local_ptr, bx.type_ptr_to(bx.type_flat_i8p())); catch.store(ptr, bitcast, ptr_align); catch.ret(bx.const_i32(1)); }); diff --git a/src/librustc_codegen_llvm/llvm/ffi.rs b/src/librustc_codegen_llvm/llvm/ffi.rs index 11e34f600c286..3eb389d1cf413 100644 --- a/src/librustc_codegen_llvm/llvm/ffi.rs +++ b/src/librustc_codegen_llvm/llvm/ffi.rs @@ -656,6 +656,7 @@ extern "C" { pub fn LLVMGetElementType(Ty: &Type) -> &Type; pub fn LLVMGetVectorSize(VectorTy: &Type) -> c_uint; + pub fn LLVMGetPointerAddressSpace(Ty: &Type) -> c_uint; // Operations on other types pub fn LLVMVoidTypeInContext(C: &Context) -> &Type; @@ -716,6 +717,7 @@ extern "C" { pub fn LLVMConstIntToPtr(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value; pub fn LLVMConstBitCast(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value; pub fn LLVMConstPointerCast(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value; + pub fn LLVMConstAddrSpaceCast(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value; pub fn LLVMConstExtractValue(AggConstant: &Value, IdxList: *const c_uint, NumIdx: c_uint) @@ -737,8 +739,10 @@ extern "C" { pub fn LLVMIsAGlobalVariable(GlobalVar: &Value) -> Option<&Value>; pub fn LLVMAddGlobal(M: &'a Module, Ty: &'a Type, Name: *const c_char) -> &'a Value; pub fn LLVMGetNamedGlobal(M: &Module, Name: *const c_char) -> Option<&Value>; - pub fn LLVMRustGetOrInsertGlobal(M: &'a Module, Name: *const c_char, T: &'a Type) -> &'a Value; - pub fn LLVMRustInsertPrivateGlobal(M: &'a Module, T: &'a Type) -> &'a Value; + pub fn LLVMRustGetOrInsertGlobal(M: &'a Module, Name: *const c_char, T: &'a Type, + AS: c_uint) -> &'a Value; + pub fn LLVMRustInsertPrivateGlobal(M: &'a Module, T: &'a Type, + AS: c_uint) -> &'a Value; pub fn LLVMGetFirstGlobal(M: &Module) -> Option<&Value>; pub fn LLVMGetNextGlobal(GlobalVar: &Value) -> Option<&Value>; pub fn LLVMDeleteGlobal(GlobalVar: &Value); @@ -1083,6 +1087,11 @@ extern "C" { DestTy: &'a Type, Name: *const c_char) -> &'a Value; + pub fn LLVMBuildAddrSpaceCast(B: &Builder<'a>, + Val: &'a Value, + DestTy: &'a Type, + Name: *const c_char) + -> &'a Value; pub fn LLVMRustBuildIntCast(B: &Builder<'a>, Val: &'a Value, DestTy: &'a Type, diff --git a/src/librustc_codegen_llvm/mono_item.rs b/src/librustc_codegen_llvm/mono_item.rs index 69fc8783dc8d2..f933d804fa07c 100644 --- a/src/librustc_codegen_llvm/mono_item.rs +++ b/src/librustc_codegen_llvm/mono_item.rs @@ -4,6 +4,7 @@ use context::CodegenCx; use llvm; use monomorphize::Instance; use type_of::LayoutLlvmExt; +use rustc::hir::def::Def; use rustc::hir::def_id::{DefId, LOCAL_CRATE}; use rustc::mir::mono::{Linkage, Visibility}; use rustc::ty::TypeFoldable; @@ -22,7 +23,12 @@ impl PreDefineMethods<'tcx> for CodegenCx<'ll, 'tcx> { let ty = instance.ty(self.tcx); let llty = self.layout_of(ty).llvm_type(self); - let g = self.define_global(symbol_name, llty).unwrap_or_else(|| { + let addr_space = match self.tcx.describe_def(def_id) { + Some(Def::Static(_, true)) => self.mutable_addr_space(), + _ => self.const_addr_space(), + }; + + let g = self.define_global(symbol_name, llty, addr_space).unwrap_or_else(|| { self.sess().span_fatal(self.tcx.def_span(def_id), &format!("symbol `{}` is already defined", symbol_name)) }); diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index 958e00506d62a..dae5ff82aa52c 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -24,6 +24,8 @@ use std::ptr; use libc::c_uint; +pub use rustc_target::spec::{AddrSpaceKind, AddrSpaceIdx}; + impl PartialEq for Type { fn eq(&self, other: &Self) -> bool { ptr::eq(self, other) @@ -186,10 +188,8 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type { - assert_ne!(self.type_kind(ty), TypeKind::Function, - "don't call ptr_to on function types, use ptr_to_llvm_type on FnType instead"); - ty.ptr_to() + fn type_as_ptr_to(&self, ty: &'ll Type, addr_space: AddrSpaceIdx) -> &'ll Type { + ty.ptr_to(addr_space) } fn element_type(&self, ty: &'ll Type) -> &'ll Type { @@ -237,6 +237,14 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn scalar_lltypes(&self) -> &RefCell, Self::Type>> { &self.scalar_lltypes } + + fn type_addr_space(&self, ty: &'ll Type) -> Option { + if self.type_kind(ty) == TypeKind::Pointer { + Some(ty.address_space()) + } else { + None + } + } } impl Type { @@ -257,12 +265,42 @@ impl Type { } pub fn i8p_llcx(llcx: &'ll llvm::Context) -> &'ll Type { - Type::i8_llcx(llcx).ptr_to() + Type::i8_llcx(llcx).ptr_to(Default::default()) + } + + pub fn kind(&self) -> TypeKind { + unsafe { + llvm::LLVMRustGetTypeKind(self).to_generic() + } + } + pub fn is_ptr(&self) -> bool { + self.kind() == TypeKind::Pointer } - fn ptr_to(&self) -> &Type { + fn element_type(&self) -> &Type { unsafe { - llvm::LLVMPointerType(&self, 0) + llvm::LLVMGetElementType(self) + } + } + + fn ptr_to(&self, addr_space: AddrSpaceIdx) -> &Type { + unsafe { + llvm::LLVMPointerType(&self, + addr_space.0) + } + } + pub fn address_space(&self) -> AddrSpaceIdx { + AddrSpaceIdx(unsafe { + llvm::LLVMGetPointerAddressSpace(self) + }) + } + pub fn copy_addr_space(&self, addr_space: AddrSpaceIdx) -> &Type { + if !self.is_ptr() { return self; } + + if addr_space != self.address_space() { + self.element_type().ptr_to(addr_space) + } else { + self } } } diff --git a/src/librustc_codegen_llvm/type_of.rs b/src/librustc_codegen_llvm/type_of.rs index 97128c2d2a2ce..08358df41fe1b 100644 --- a/src/librustc_codegen_llvm/type_of.rs +++ b/src/librustc_codegen_llvm/type_of.rs @@ -237,6 +237,12 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { /// with the inner-most trailing unsized field using the "minimal unit" /// of that field's type - this is useful for taking the address of /// that field and ensuring the struct has the right alignment. + /// + /// Note: the address space used for ptrs is important. Due to the nested + /// nature of these types, we must assume pointers are in the flat space. + /// Spaces are overriden as needed (or will be, in a later patch), when it + /// is known in which space a memory location will reside. + /// fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { if let layout::Abi::Scalar(ref scalar) = self.abi { // Use a different cache for scalars because pointers to DSTs @@ -247,10 +253,10 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { let llty = match self.ty.sty { ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => { - cx.type_ptr_to(cx.layout_of(ty).llvm_type(cx)) + cx.type_ptr_to_flat(cx.layout_of(ty).llvm_type(cx)) } ty::Adt(def, _) if def.is_box() => { - cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx)) + cx.type_ptr_to_flat(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx)) } ty::FnPtr(sig) => { let sig = cx.tcx.normalize_erasing_late_bound_regions( diff --git a/src/librustc_codegen_ssa/mir/block.rs b/src/librustc_codegen_ssa/mir/block.rs index aa82c853257a3..e457f06028dbc 100644 --- a/src/librustc_codegen_ssa/mir/block.rs +++ b/src/librustc_codegen_ssa/mir/block.rs @@ -279,6 +279,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.load(addr, self.fn_ty.ret.layout.align.abi) } }; + // make sure pointers are flat: + let llval = bx.flat_addr_cast(llval); bx.ret(llval); } @@ -389,6 +391,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { align, Some("panic_bounds_check_loc") ); + let file_line_col = bx.cx().const_flat_as_cast(file_line_col); (lang_items::PanicBoundsCheckFnLangItem, vec![file_line_col, index, len]) } @@ -405,6 +408,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { align, Some("panic_loc") ); + let msg_file_line_col = bx.cx().const_flat_as_cast(msg_file_line_col); (lang_items::PanicFnLangItem, vec![msg_file_line_col]) } @@ -529,6 +533,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { align, Some("panic_loc"), ); + let msg_file_line_col = bx.cx().const_flat_as_cast(msg_file_line_col); // Obtain the panic entry point. let def_id = diff --git a/src/librustc_codegen_ssa/mir/place.rs b/src/librustc_codegen_ssa/mir/place.rs index b10611e5ac797..bceafeec04ad7 100644 --- a/src/librustc_codegen_ssa/mir/place.rs +++ b/src/librustc_codegen_ssa/mir/place.rs @@ -45,6 +45,18 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { bx: &mut Bx, layout: TyLayout<'tcx>, name: &str + ) -> Self { + debug!("alloca({:?}: {:?})", name, layout); + assert!(!layout.is_unsized(), "tried to statically allocate unsized place"); + let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align.abi); + Self::new_sized(bx.flat_addr_cast(tmp), layout, layout.align.abi) + } + + /// An alloca, left in the alloca address space. If unsure, use `alloca` below. + pub fn alloca_addr_space>( + bx: &mut Bx, + layout: TyLayout<'tcx>, + name: &str ) -> Self { debug!("alloca({:?}: {:?})", name, layout); assert!(!layout.is_unsized(), "tried to statically allocate unsized place"); diff --git a/src/librustc_codegen_ssa/mir/rvalue.rs b/src/librustc_codegen_ssa/mir/rvalue.rs index 9ca5414fa717e..97cfb5854674b 100644 --- a/src/librustc_codegen_ssa/mir/rvalue.rs +++ b/src/librustc_codegen_ssa/mir/rvalue.rs @@ -89,6 +89,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } let zero = bx.cx().const_usize(0); let start = dest.project_index(&mut bx, zero).llval; + let start = bx.flat_addr_cast(start); if let OperandValue::Immediate(v) = cg_elem.val { let size = bx.cx().const_usize(dest.layout.size.bytes()); @@ -110,6 +111,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let count = bx.cx().const_usize(count); let end = dest.project_index(&mut bx, count).llval; + let end = bx.flat_addr_cast(end); let mut header_bx = bx.build_sibling_block("repeat_loop_header"); let mut body_bx = bx.build_sibling_block("repeat_loop_body"); @@ -243,6 +245,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // until LLVM removes pointee types. let lldata = bx.pointercast(lldata, bx.cx().scalar_pair_element_backend_type(cast, 0, true)); + let lldata = bx.flat_addr_cast(lldata); OperandValue::Pair(lldata, llextra) } OperandValue::Immediate(lldata) => { @@ -350,11 +353,19 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } (CastTy::Ptr(_), CastTy::Ptr(_)) | (CastTy::FnPtr, CastTy::Ptr(_)) | - (CastTy::RPtr(_), CastTy::Ptr(_)) => - bx.pointercast(llval, ll_t_out), + (CastTy::RPtr(_), CastTy::Ptr(_)) => { + // This is left in it's original address space. This is okay + // because a &mut T -> &T cast wouldn't change the address + // space used to load it. + bx.pointercast(llval, ll_t_out) + } (CastTy::Ptr(_), CastTy::Int(_)) | - (CastTy::FnPtr, CastTy::Int(_)) => - bx.ptrtoint(llval, ll_t_out), + (CastTy::FnPtr, CastTy::Int(_)) => { + // Ensure the ptr is in the flat address space. + // This might not be required, but it is safe. + let llval = bx.flat_addr_cast(llval); + bx.ptrtoint(llval, ll_t_out) + }, (CastTy::Int(_), CastTy::Ptr(_)) => { let usize_llval = bx.intcast(llval, bx.cx().type_isize(), signed); bx.inttoptr(usize_llval, ll_t_out) @@ -607,6 +618,17 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { lhs, rhs ) } else { + // In case we're in separate addr spaces. + // Can happen when cmp against null_mut, eg. + // `infer-addr-spaces` should propagate. + let lhs_ty = bx.cx().val_ty(rhs); + let (lhs, rhs) = if bx.cx().type_addr_space(lhs_ty).is_some() { + assert!(bx.cx().type_addr_space(bx.cx().val_ty(rhs)).is_some()); + (bx.flat_addr_cast(lhs), + bx.flat_addr_cast(rhs)) + } else { + (lhs, rhs) + }; bx.icmp( base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed), lhs, rhs @@ -625,6 +647,12 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { rhs_extra: Bx::Value, _input_ty: Ty<'tcx>, ) -> Bx::Value { + // In case we're in separate addr spaces. + // Can happen when cmp against null_mut, eg. + // `infer-addr-spaces` should propagate. + let lhs_addr = bx.flat_addr_cast(lhs_addr); + let rhs_addr = bx.flat_addr_cast(rhs_addr); + match op { mir::BinOp::Eq => { let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr); diff --git a/src/librustc_codegen_ssa/traits/builder.rs b/src/librustc_codegen_ssa/traits/builder.rs index bc66087d3ce70..207fc2f660da6 100644 --- a/src/librustc_codegen_ssa/traits/builder.rs +++ b/src/librustc_codegen_ssa/traits/builder.rs @@ -9,6 +9,7 @@ use mir::operand::OperandRef; use mir::place::PlaceRef; use rustc::ty::Ty; use rustc::ty::layout::{Align, Size}; +use rustc_target::spec::AddrSpaceIdx; use std::ffi::CStr; use MemFlags; @@ -155,8 +156,20 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: fn inttoptr(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; fn bitcast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; fn intcast(&mut self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value; + /// Impls should ignore the address space of `dest_ty`. fn pointercast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + /// address space casts, then bitcasts to dest_ty without changing address spaces. + fn as_ptr_cast(&mut self, + val: Self::Value, + addr_space: AddrSpaceIdx, + dest_ty: Self::Type) -> Self::Value; + fn addrspace_cast(&mut self, val: Self::Value, + dest: AddrSpaceIdx) -> Self::Value; + fn flat_addr_cast(&mut self, val: Self::Value) -> Self::Value; + fn flat_as_ptr_cast(&mut self, val: Self::Value, + dest_ty: Self::Type) -> Self::Value; + fn icmp(&mut self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value; fn fcmp(&mut self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value; diff --git a/src/librustc_codegen_ssa/traits/consts.rs b/src/librustc_codegen_ssa/traits/consts.rs index 482fb67e2b0c2..14be8a3545015 100644 --- a/src/librustc_codegen_ssa/traits/consts.rs +++ b/src/librustc_codegen_ssa/traits/consts.rs @@ -1,11 +1,12 @@ -use super::BackendTypes; +use super::MiscMethods; use mir::place::PlaceRef; use rustc::mir::interpret::Allocation; use rustc::mir::interpret::Scalar; use rustc::ty::layout; use syntax::symbol::LocalInternedString; +use rustc_target::spec::AddrSpaceIdx; -pub trait ConstMethods<'tcx>: BackendTypes { +pub trait ConstMethods<'tcx>: MiscMethods<'tcx> { // Constant constructors fn const_null(&self, t: Self::Type) -> Self::Value; fn const_undef(&self, t: Self::Type) -> Self::Value; @@ -35,6 +36,11 @@ pub trait ConstMethods<'tcx>: BackendTypes { fn const_to_uint(&self, v: Self::Value) -> u64; fn const_to_opt_u128(&self, v: Self::Value, sign_ext: bool) -> Option; + fn const_as_cast(&self, v: Self::Value, space: AddrSpaceIdx) -> Self::Value; + fn const_flat_as_cast(&self, v: Self::Value) -> Self::Value { + self.const_as_cast(v, self.flat_addr_space()) + } + fn is_const_integral(&self, v: Self::Value) -> bool; fn is_const_real(&self, v: Self::Value) -> bool; diff --git a/src/librustc_codegen_ssa/traits/declare.rs b/src/librustc_codegen_ssa/traits/declare.rs index 3cd3c4e48b998..743cf006532d5 100644 --- a/src/librustc_codegen_ssa/traits/declare.rs +++ b/src/librustc_codegen_ssa/traits/declare.rs @@ -3,13 +3,15 @@ use rustc::hir::def_id::DefId; use rustc::mir::mono::{Linkage, Visibility}; use rustc::ty; use rustc_mir::monomorphize::Instance; +use rustc_target::spec::AddrSpaceIdx; pub trait DeclareMethods<'tcx>: BackendTypes { /// Declare a global value. /// /// If there’s a value with the same name already declared, the function will /// return its Value instead. - fn declare_global(&self, name: &str, ty: Self::Type) -> Self::Value; + fn declare_global(&self, name: &str, ty: Self::Type, + addr_space: AddrSpaceIdx) -> Self::Value; /// Declare a C ABI function. /// @@ -32,12 +34,14 @@ pub trait DeclareMethods<'tcx>: BackendTypes { /// return None if the name already has a definition associated with it. In that /// case an error should be reported to the user, because it usually happens due /// to user’s fault (e.g., misuse of #[no_mangle] or #[export_name] attributes). - fn define_global(&self, name: &str, ty: Self::Type) -> Option; + fn define_global(&self, name: &str, ty: Self::Type, + addr_space: AddrSpaceIdx) -> Option; /// Declare a private global /// /// Use this function when you intend to define a global without a name. - fn define_private_global(&self, ty: Self::Type) -> Self::Value; + fn define_private_global(&self, ty: Self::Type, + addr_space: AddrSpaceIdx) -> Self::Value; /// Declare a Rust function with an intention to define it. /// diff --git a/src/librustc_codegen_ssa/traits/misc.rs b/src/librustc_codegen_ssa/traits/misc.rs index b23155563665d..bc55a06f7f8e8 100644 --- a/src/librustc_codegen_ssa/traits/misc.rs +++ b/src/librustc_codegen_ssa/traits/misc.rs @@ -5,6 +5,7 @@ use rustc::session::Session; use rustc::ty::{self, Instance, Ty}; use rustc::util::nodemap::FxHashMap; use rustc_mir::monomorphize::partitioning::CodegenUnit; +use rustc_target::spec::AddrSpaceIdx; use std::cell::RefCell; use std::sync::Arc; @@ -26,4 +27,10 @@ pub trait MiscMethods<'tcx>: BackendTypes { fn set_frame_pointer_elimination(&self, llfn: Self::Value); fn apply_target_cpu_attr(&self, llfn: Self::Value); fn create_used_variable(&self); + + fn can_cast_addr_space(&self, _from: AddrSpaceIdx, _to: AddrSpaceIdx) -> bool { true } + fn alloca_addr_space(&self) -> AddrSpaceIdx { Default::default() } + fn const_addr_space(&self) -> AddrSpaceIdx { Default::default() } + fn mutable_addr_space(&self) -> AddrSpaceIdx { Default::default() } + fn flat_addr_space(&self) -> AddrSpaceIdx { Default::default() } } diff --git a/src/librustc_codegen_ssa/traits/mod.rs b/src/librustc_codegen_ssa/traits/mod.rs index 8fe8b7ecd4709..538bd73df53fc 100644 --- a/src/librustc_codegen_ssa/traits/mod.rs +++ b/src/librustc_codegen_ssa/traits/mod.rs @@ -59,6 +59,20 @@ pub trait CodegenMethods<'tcx>: + AsmMethods<'tcx> + PreDefineMethods<'tcx> { + /// Check that we can actually cast between these addr spaces. + fn check_addr_space_cast(&self, val: Self::Value, dest: Self::Type) { + let src_ty = self.val_ty(val); + + match (self.type_addr_space(src_ty), self.type_addr_space(dest)) { + (Some(left), Some(right)) if !self.can_cast_addr_space(left, right) => { + bug!("Target incompatible address space cast:\n\ + source addr space `{}`, dest addr space `{}`\n\ + source value: {:?}, dest ty: {:?}", + left, right, val, dest); + }, + _ => { }, + } + } } impl<'tcx, T> CodegenMethods<'tcx> for T where diff --git a/src/librustc_codegen_ssa/traits/type_.rs b/src/librustc_codegen_ssa/traits/type_.rs index 2ec0c8e5a75cc..6d70f5b021a4c 100644 --- a/src/librustc_codegen_ssa/traits/type_.rs +++ b/src/librustc_codegen_ssa/traits/type_.rs @@ -7,6 +7,7 @@ use rustc::ty::layout::{self, Align, Size, TyLayout}; use rustc::ty::{self, Ty}; use rustc::util::nodemap::FxHashMap; use rustc_target::abi::call::{ArgType, CastTarget, FnType, Reg}; +use rustc_target::spec::AddrSpaceIdx; use std::cell::RefCell; use syntax::ast; @@ -36,7 +37,13 @@ pub trait BaseTypeMethods<'tcx>: Backend<'tcx> { fn type_array(&self, ty: Self::Type, len: u64) -> Self::Type; fn type_vector(&self, ty: Self::Type, len: u64) -> Self::Type; fn type_kind(&self, ty: Self::Type) -> TypeKind; - fn type_ptr_to(&self, ty: Self::Type) -> Self::Type; + + /// Return a pointer to `ty` in the default address space. + fn type_ptr_to(&self, ty: Self::Type) -> Self::Type { + self.type_as_ptr_to(ty, Default::default()) + } + fn type_as_ptr_to(&self, ty: Self::Type, addr_space: AddrSpaceIdx) -> Self::Type; + fn element_type(&self, ty: Self::Type) -> Self::Type; /// Return the number of elements in `self` if it is a LLVM vector type. @@ -49,7 +56,21 @@ pub trait BaseTypeMethods<'tcx>: Backend<'tcx> { fn int_width(&self, ty: Self::Type) -> u64; fn val_ty(&self, v: Self::Value) -> Self::Type; + fn val_addr_space(&self, v: Self::Value) -> Option { + self.type_addr_space(self.val_ty(v)) + } fn scalar_lltypes(&self) -> &RefCell, Self::Type>>; + + fn type_addr_space(&self, ty: Self::Type) -> Option; + fn type_copy_addr_space(&self, ty: Self::Type, addr_space: Option) -> Self::Type { + match (addr_space, self.type_kind(ty)) { + (Some(addr_space), TypeKind::Pointer) => { + let elem = self.element_type(ty); + self.type_as_ptr_to(elem, addr_space) + }, + _ => ty, + } + } } pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> { @@ -60,6 +81,21 @@ pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> { fn type_i8p(&self) -> Self::Type { self.type_ptr_to(self.type_i8()) } + fn type_i8p_as(&self, addr_space: AddrSpaceIdx) -> Self::Type { + self.type_as_ptr_to(self.type_i8(), addr_space) + } + fn type_alloca_i8p(&self) -> Self::Type { + self.type_i8p_as(self.alloca_addr_space()) + } + fn type_const_i8p(&self) -> Self::Type { + self.type_i8p_as(self.const_addr_space()) + } + fn type_mut_i8p(&self) -> Self::Type { + self.type_i8p_as(self.mutable_addr_space()) + } + fn type_flat_i8p(&self) -> Self::Type { + self.type_i8p_as(self.flat_addr_space()) + } fn type_int(&self) -> Self::Type { match &self.sess().target.target.target_c_int_width[..] { @@ -151,6 +187,45 @@ pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> { _ => bug!("unexpected unsized tail: {:?}", tail.sty), } } + /// Enforce no address space changes are happening in a cast. + /// Pointers in different address spaces can have different + /// machine level sizes (ie on AMDGPU, allocas are 32bits, + /// not 64bits!). We enforce that the flat address space is the + /// largest (+alignment), so that address space is safe to cast to + /// ints/etc. Also, address space changes require computing a offset + /// or two, so a straight bitcast is wrong. + fn type_check_no_addr_space_change(&self, what: &str, + src: Self::Value, + dest_ty: Self::Type) { + let src_ty = self.val_ty(src); + match (self.type_addr_space(src_ty), self.type_addr_space(dest_ty)) { + (Some(src_as), Some(dest_as)) if src_as != dest_as => { + bug!("Invalid address space cast in `{}` cast:\n\ + source addr space `{}`, dest addr space `{}`\n\ + source value: {:?}, dest ty: {:?}", what, + src_as, dest_as, src, dest_ty); + }, + (Some(src_as), None) if src_as != self.flat_addr_space() => { + bug!("Invalid address space cast in `{}` cast:\n\ + source addr space `{}` is not flat\n\ + source value: {:?}", + what, src_as, src); + }, + _ => { }, + } + } + fn type_ptr_to_alloca(&self, ty: Self::Type) -> Self::Type { + self.type_as_ptr_to(ty, self.alloca_addr_space()) + } + fn type_ptr_to_const(&self, ty: Self::Type) -> Self::Type { + self.type_as_ptr_to(ty, self.const_addr_space()) + } + fn type_ptr_to_mut(&self, ty: Self::Type) -> Self::Type { + self.type_as_ptr_to(ty, self.mutable_addr_space()) + } + fn type_ptr_to_flat(&self, ty: Self::Type) -> Self::Type { + self.type_as_ptr_to(ty, self.flat_addr_space()) + } } impl DerivedTypeMethods<'tcx> for T where Self: BaseTypeMethods<'tcx> + MiscMethods<'tcx> {} diff --git a/src/librustc_target/abi/mod.rs b/src/librustc_target/abi/mod.rs index 3f95e666535be..63c1244672ad3 100644 --- a/src/librustc_target/abi/mod.rs +++ b/src/librustc_target/abi/mod.rs @@ -1,7 +1,7 @@ pub use self::Integer::*; pub use self::Primitive::*; -use spec::Target; +use spec::{Target, AddrSpaceIdx, }; use std::fmt; use std::ops::{Add, Deref, Sub, Mul, AddAssign, Range, RangeInclusive}; @@ -22,6 +22,7 @@ pub struct TargetDataLayout { pub i128_align: AbiAndPrefAlign, pub f32_align: AbiAndPrefAlign, pub f64_align: AbiAndPrefAlign, + pub pointers: Vec>, pub pointer_size: Size, pub pointer_align: AbiAndPrefAlign, pub aggregate_align: AbiAndPrefAlign, @@ -29,6 +30,8 @@ pub struct TargetDataLayout { /// Alignments for vector types. pub vector_align: Vec<(Size, AbiAndPrefAlign)>, + pub alloca_address_space: AddrSpaceIdx, + pub instruction_address_space: u32, } @@ -46,9 +49,11 @@ impl Default for TargetDataLayout { i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) }, f32_align: AbiAndPrefAlign::new(align(32)), f64_align: AbiAndPrefAlign::new(align(64)), + pointers: vec![], pointer_size: Size::from_bits(64), pointer_align: AbiAndPrefAlign::new(align(64)), aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) }, + alloca_address_space: Default::default(), vector_align: vec![ (Size::from_bits(64), AbiAndPrefAlign::new(align(64))), (Size::from_bits(128), AbiAndPrefAlign::new(align(128))), @@ -60,14 +65,6 @@ impl Default for TargetDataLayout { impl TargetDataLayout { pub fn parse(target: &Target) -> Result { - // Parse an address space index from a string. - let parse_address_space = |s: &str, cause: &str| { - s.parse::().map_err(|err| { - format!("invalid address space `{}` for `{}` in \"data-layout\": {}", - s, cause, err) - }) - }; - // Parse a bit count from a string. let parse_bits = |s: &str, kind: &str, cause: &str| { s.parse::().map_err(|err| { @@ -100,23 +97,38 @@ impl TargetDataLayout { }) }; + fn resize_and_set(vec: &mut Vec, idx: usize, v: T) + where T: Default, + { + while idx >= vec.len() { + vec.push(T::default()); + } + + vec[idx] = v; + } + let mut dl = TargetDataLayout::default(); let mut i128_align_src = 64; for spec in target.data_layout.split('-') { match spec.split(':').collect::>()[..] { ["e"] => dl.endian = Endian::Little, ["E"] => dl.endian = Endian::Big, - [p] if p.starts_with("P") => { - dl.instruction_address_space = parse_address_space(&p[1..], "P")? - } ["a", ref a..] => dl.aggregate_align = align(a, "a")?, ["f32", ref a..] => dl.f32_align = align(a, "f32")?, ["f64", ref a..] => dl.f64_align = align(a, "f64")?, [p @ "p", s, ref a..] | [p @ "p0", s, ref a..] => { dl.pointer_size = size(s, p)?; dl.pointer_align = align(a, p)?; - } - [s, ref a..] if s.starts_with("i") => { + resize_and_set(&mut dl.pointers, 0, Some((dl.pointer_size, + dl.pointer_align))); + }, + [p, s, ref a..] if p.starts_with('p') => { + let idx = parse_bits(&p[1..], "u32", "address space index")? as usize; + let size = size(s, p)?; + let align = align(a, p)?; + resize_and_set(&mut dl.pointers, idx, Some((size, align))); + }, + [s, ref a..] if s.starts_with("i") => { let bits = match s[1..].parse::() { Ok(bits) => bits, Err(_) => { @@ -149,7 +161,13 @@ impl TargetDataLayout { } // No existing entry, add a new one. dl.vector_align.push((v_size, a)); - } + }, + [s, ..] if s.starts_with("A") => { + // default alloca address space + let idx = parse_bits(&s[1..], "u32", + "default alloca address space")? as u32; + dl.alloca_address_space = AddrSpaceIdx(idx); + }, _ => {} // Ignore everything else. } } @@ -171,9 +189,37 @@ impl TargetDataLayout { dl.pointer_size.bits(), target.target_pointer_width)); } + // We don't specialize pointer sizes for specific address spaces, + // so enforce that the default address space can hold all the bits + // of any other spaces. Similar for alignment. + { + let ptrs_iter = dl.pointers.iter().enumerate() + .filter_map(|(idx, ptrs)| { + ptrs.map(|(s, a)| (idx, s, a) ) + }); + for (idx, size, align) in ptrs_iter { + if size > dl.pointer_size { + return Err(format!("Address space {} pointer is bigger than the default \ + pointer: {} vs {}", + idx, size.bits(), dl.pointer_size.bits())); + } + if align.abi > dl.pointer_align.abi { + return Err(format!("Address space {} pointer alignment is bigger than the \ + default pointer: {} vs {}", + idx, align.abi.bits(), dl.pointer_align.abi.bits())); + } + } + } + Ok(dl) } + pub fn pointer_info(&self, addr_space: AddrSpaceIdx) -> (Size, AbiAndPrefAlign) { + self.pointers.get(addr_space.0 as usize) + .and_then(|&v| v ) + .unwrap_or((self.pointer_size, self.pointer_align)) + } + /// Return exclusive upper bound on object size. /// /// The theoretical maximum object size is defined as the maximum positive `isize` value. @@ -940,3 +986,82 @@ impl<'a, Ty> TyLayout<'a, Ty> { } } } + +#[cfg(test)] +mod tests { + use super::*; + use spec::{Target, TargetTriple, }; + + #[test] + fn pointer_size_align() { + // amdgcn-amd-amdhsa-amdgiz + const DL: &'static str = "e-p:64:64-p1:64:64-p2:64:64-p3:32:32-\ + p4:32:32-p5:32:32-i64:64-v16:16-v24:32-\ + v32:32-v48:64-v96:128-v192:256-v256:256-\ + v512:512-v1024:1024-v2048:2048-n32:64-A5"; + + // Doesn't need to be real... + let triple = TargetTriple::TargetTriple("x86_64-unknown-linux-gnu".into()); + let mut target = Target::search(&triple).unwrap(); + target.data_layout = DL.into(); + + let dl = TargetDataLayout::parse(&target); + assert!(dl.is_ok()); + let dl = dl.unwrap(); + + let default = (dl.pointer_size, dl.pointer_align); + + let thirty_two_size = Size::from_bits(32); + let thirty_two_align = AbiAndPrefAlign::new(Align::from_bits(32).unwrap()); + let thirty_two = (thirty_two_size, thirty_two_align); + let sixty_four_size = Size::from_bits(64); + let sixty_four_align = AbiAndPrefAlign::new(Align::from_bits(64).unwrap()); + let sixty_four = (sixty_four_size, sixty_four_align); + + assert_eq!(dl.pointer_info(AddrSpaceIdx(0)), default); + assert_eq!(dl.pointer_info(AddrSpaceIdx(0)), sixty_four); + assert_eq!(dl.pointer_info(AddrSpaceIdx(1)), sixty_four); + assert_eq!(dl.pointer_info(AddrSpaceIdx(2)), sixty_four); + assert_eq!(dl.pointer_info(AddrSpaceIdx(3)), thirty_two); + assert_eq!(dl.pointer_info(AddrSpaceIdx(4)), thirty_two); + assert_eq!(dl.pointer_info(AddrSpaceIdx(5)), thirty_two); + + // unknown address spaces need to be the same as the default: + assert_eq!(dl.pointer_info(AddrSpaceIdx(7)), default); + } + + #[test] + fn default_is_biggest() { + // Note p1 is 128 bits. + const DL: &'static str = "e-p:64:64-p1:128:128-p2:64:64-p3:32:32-\ + p4:32:32-p5:32:32-i64:64-v16:16-v24:32-\ + v32:32-v48:64-v96:128-v192:256-v256:256-\ + v512:512-v1024:1024-v2048:2048-n32:64-A5"; + + // Doesn't need to be real... + let triple = TargetTriple::TargetTriple("x86_64-unknown-linux-gnu".into()); + let mut target = Target::search(&triple).unwrap(); + target.data_layout = DL.into(); + + assert!(TargetDataLayout::parse(&target).is_err()); + } + #[test] + fn alloca_addr_space() { + // amdgcn-amd-amdhsa-amdgiz + const DL: &'static str = "e-p:64:64-p1:64:64-p2:64:64-p3:32:32-\ + p4:32:32-p5:32:32-i64:64-v16:16-v24:32-\ + v32:32-v48:64-v96:128-v192:256-v256:256-\ + v512:512-v1024:1024-v2048:2048-n32:64-A5"; + + // Doesn't need to be real... + let triple = TargetTriple::TargetTriple("x86_64-unknown-linux-gnu".into()); + let mut target = Target::search(&triple).unwrap(); + target.data_layout = DL.into(); + + let dl = TargetDataLayout::parse(&target); + assert!(dl.is_ok()); + let dl = dl.unwrap(); + + assert_eq!(dl.alloca_address_space, AddrSpaceIdx(5)); + } +} diff --git a/src/librustc_target/spec/mod.rs b/src/librustc_target/spec/mod.rs index 3a21ca19b176b..8ddebd05ca871 100644 --- a/src/librustc_target/spec/mod.rs +++ b/src/librustc_target/spec/mod.rs @@ -35,11 +35,12 @@ //! to the list specified by the target, rather than replace. use serialize::json::{Json, ToJson}; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, BTreeSet}; use std::default::Default; use std::{fmt, io}; use std::path::{Path, PathBuf}; use std::str::FromStr; +use std::ops::{Deref, DerefMut, }; use spec::abi::{Abi, lookup as lookup_abi}; pub mod abi; @@ -260,6 +261,158 @@ impl ToJson for MergeFunctions { pub type LinkArgs = BTreeMap>; pub type TargetResult = Result; +#[derive(Clone, Copy, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)] +pub struct AddrSpaceIdx(pub u32); +impl Default for AddrSpaceIdx { + fn default() -> Self { + AddrSpaceIdx(0) + } +} +impl fmt::Display for AddrSpaceIdx { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) + } +} +impl FromStr for AddrSpaceIdx { + type Err = ::Err; + fn from_str(s: &str) -> Result { + Ok(AddrSpaceIdx(u32::from_str(s)?)) + } +} + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub enum AddrSpaceKind { + Flat, + Alloca, + /// aka constant + ReadOnly, + /// aka global + ReadWrite, + Named(String), +} + +impl FromStr for AddrSpaceKind { + type Err = String; + fn from_str(s: &str) -> Result { + Ok(match s { + "flat" => AddrSpaceKind::Flat, + "alloca" => AddrSpaceKind::Alloca, + "readonly" => AddrSpaceKind::ReadOnly, + "readwrite" => AddrSpaceKind::ReadWrite, + named => AddrSpaceKind::Named(named.into()), + }) + } +} +impl fmt::Display for AddrSpaceKind { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", match self { + &AddrSpaceKind::Flat => "flat", + &AddrSpaceKind::Alloca => "alloca", + &AddrSpaceKind::ReadOnly => "readonly", + &AddrSpaceKind::ReadWrite => "readwrite", + &AddrSpaceKind::Named(ref s) => s, + }) + } +} +impl ToJson for AddrSpaceKind { + fn to_json(&self) -> Json { + Json::String(format!("{}", self)) + } +} + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub struct AddrSpaceProps { + pub index: AddrSpaceIdx, + /// Indicates which addr spaces this addr space can be addrspacecast-ed to. + pub shared_with: BTreeSet, +} + +impl AddrSpaceProps { + pub fn from_json(json: &Json) -> Result { + let index = json.find("index").and_then(|v| v.as_u64() ) + .ok_or_else(|| { + "invalid address space index, expected an unsigned integer" + })?; + + let mut shared_with = vec![]; + if let Some(shared) = json.find("shared-with").and_then(|v| v.as_array() ) { + for s in shared { + let s = s.as_string() + .ok_or_else(|| { + "expected string for address space kind" + })?; + + let kind = AddrSpaceKind::from_str(s)?; + shared_with.push(kind); + } + } + + Ok(AddrSpaceProps { + index: AddrSpaceIdx(index as u32), + shared_with: shared_with.into_iter().collect(), + }) + } +} +impl ToJson for AddrSpaceProps { + fn to_json(&self) -> Json { + let mut obj = BTreeMap::new(); + obj.insert("index".to_string(), self.index.0.to_json()); + let mut shared_with = vec![]; + for sw in self.shared_with.iter() { + shared_with.push(sw.to_json()); + } + obj.insert("shared-with".to_string(), Json::Array(shared_with)); + + Json::Object(obj) + } +} +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub struct AddrSpaces(pub BTreeMap); +impl Deref for AddrSpaces { + type Target = BTreeMap; + fn deref(&self) -> &Self::Target { &self.0 } +} +impl DerefMut for AddrSpaces { + fn deref_mut(&mut self) -> &mut BTreeMap { &mut self.0 } +} +impl ToJson for AddrSpaces { + fn to_json(&self) -> Json { + let obj = self.iter() + .map(|(k, v)| { + (format!("{}", k), v.to_json()) + }) + .collect(); + Json::Object(obj) + } +} +impl Default for AddrSpaces { + fn default() -> Self { + let mut asp = BTreeMap::new(); + + let kinds = vec![AddrSpaceKind::ReadOnly, + AddrSpaceKind::ReadWrite, + AddrSpaceKind::Alloca, + AddrSpaceKind::Flat, ]; + + let insert = |asp: &mut BTreeMap<_, _>, kind, idx| { + let props = AddrSpaceProps { + index: idx, + shared_with: kinds.clone() + .into_iter() + .filter(|k| *k != kind) + .collect(), + }; + assert!(asp.insert(kind, props).is_none()); + }; + + for kind in kinds.iter() { + insert(&mut asp, kind.clone(), Default::default()); + } + + AddrSpaces(asp) + } +} + macro_rules! supported_targets { ( $(($triple:expr, $module:ident),)+ ) => ( $(mod $module;)* @@ -732,6 +885,11 @@ pub struct TargetOptions { /// the usual logic to figure this out from the crate itself. pub override_export_symbols: Option>, + /// Description of all address spaces and how they are shared with one another. + /// Defaults to a single, flat, address space. Note it is generally assumed that + /// the address space `0` is your flat address space. + pub addr_spaces: AddrSpaces, + /// Determines how or whether the MergeFunctions LLVM pass should run for /// this target. Either "disabled", "trampolines", or "aliases". /// The MergeFunctions pass is generally useful, but some targets may need @@ -821,6 +979,7 @@ impl Default for TargetOptions { requires_uwtable: false, simd_types_indirect: true, override_export_symbols: None, + addr_spaces: Default::default(), merge_functions: MergeFunctions::Aliases, } } @@ -1051,6 +1210,16 @@ impl Target { } } } ); + ($key_name:ident, addr_spaces) => ( { + let name = (stringify!($key_name)).replace("_", "-"); + if let Some(obj) = obj.find(&name[..]).and_then(|o| o.as_object() ) { + for (k, v) in obj { + let k = AddrSpaceKind::from_str(&k).unwrap(); + let props = AddrSpaceProps::from_json(v)?; + base.options.$key_name.insert(k, props); + } + } + } ); } key!(is_builtin, bool); @@ -1126,6 +1295,7 @@ impl Target { key!(requires_uwtable, bool); key!(simd_types_indirect, bool); key!(override_export_symbols, opt_list); + key!(addr_spaces, addr_spaces); key!(merge_functions, MergeFunctions)?; if let Some(array) = obj.find("abi-blacklist").and_then(Json::as_array) { @@ -1338,6 +1508,7 @@ impl ToJson for Target { target_option_val!(requires_uwtable); target_option_val!(simd_types_indirect); target_option_val!(override_export_symbols); + target_option_val!(addr_spaces); target_option_val!(merge_functions); if default.abi_blacklist != self.options.abi_blacklist { diff --git a/src/rustllvm/RustWrapper.cpp b/src/rustllvm/RustWrapper.cpp index 9d3e6f93b0c11..f2107743288f4 100644 --- a/src/rustllvm/RustWrapper.cpp +++ b/src/rustllvm/RustWrapper.cpp @@ -121,17 +121,35 @@ extern "C" LLVMValueRef LLVMRustGetOrInsertFunction(LLVMModuleRef M, } extern "C" LLVMValueRef -LLVMRustGetOrInsertGlobal(LLVMModuleRef M, const char *Name, LLVMTypeRef Ty) { - return wrap(unwrap(M)->getOrInsertGlobal(Name, unwrap(Ty))); +LLVMRustGetOrInsertGlobal(LLVMModuleRef M, const char *Name, LLVMTypeRef Ty, unsigned AS) { + GlobalVariable* GV = nullptr; + Module* MM = unwrap(M); + Type* ETy = unwrap(Ty); + if (!(GV = MM->getNamedGlobal(Name))) { + GV = new GlobalVariable(ETy, false, GlobalVariable::ExternalLinkage, + nullptr, Name, GlobalVariable::NotThreadLocal, AS); + MM->getGlobalList().push_back(GV); + } + Type *GVTy = GV->getType(); + PointerType *PTy = PointerType::get(ETy, GVTy->getPointerAddressSpace()); + if (GVTy != PTy) { + return wrap(ConstantExpr::getBitCast(GV, PTy)); + } else { + return wrap(GV); + } } extern "C" LLVMValueRef -LLVMRustInsertPrivateGlobal(LLVMModuleRef M, LLVMTypeRef Ty) { +LLVMRustInsertPrivateGlobal(LLVMModuleRef M, LLVMTypeRef Ty, unsigned AS) { return wrap(new GlobalVariable(*unwrap(M), unwrap(Ty), false, GlobalValue::PrivateLinkage, - nullptr)); + nullptr, + "", + nullptr, + GlobalVariable::NotThreadLocal, + AS)); } extern "C" LLVMTypeRef LLVMRustMetadataTypeInContext(LLVMContextRef C) { From e041b4a496c06c6cd3b15693be119bad4f0b1177 Mon Sep 17 00:00:00 2001 From: Richard Diamond Date: Thu, 18 Oct 2018 06:07:36 -0500 Subject: [PATCH 2/3] Support Harvard architectures. --- src/librustc_codegen_llvm/abi.rs | 10 +--------- src/librustc_codegen_llvm/context.rs | 10 ++++++++++ src/librustc_codegen_llvm/declare.rs | 2 ++ src/librustc_codegen_llvm/type_.rs | 3 ++- src/librustc_codegen_ssa/meth.rs | 2 +- src/librustc_codegen_ssa/traits/misc.rs | 1 + src/librustc_codegen_ssa/traits/type_.rs | 6 ++++++ src/librustc_target/abi/mod.rs | 10 +++++++--- src/librustc_target/spec/mod.rs | 15 +++++++++++---- 9 files changed, 41 insertions(+), 18 deletions(-) diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 5ca295733b2e5..2a603433efb9f 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -11,7 +11,7 @@ use rustc_target::abi::call::ArgType; use rustc_codegen_ssa::traits::*; -use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TyLayout, Abi as LayoutAbi}; +use rustc_target::abi::{LayoutOf, Size, TyLayout, Abi as LayoutAbi}; use rustc::ty::{self, Ty, Instance}; use rustc::ty::layout; @@ -311,7 +311,6 @@ pub trait FnTypeExt<'tcx> { cx: &CodegenCx<'ll, 'tcx>, abi: Abi); fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; - fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; fn llvm_cconv(&self) -> llvm::CallConv; fn apply_attrs_llfn(&self, llfn: &'ll Value); fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value); @@ -695,13 +694,6 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { } } - fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { - unsafe { - llvm::LLVMPointerType(self.llvm_type(cx), - cx.data_layout().instruction_address_space as c_uint) - } - } - fn llvm_cconv(&self) -> llvm::CallConv { match self.conv { Conv::C => llvm::CCallConv, diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index de06a6f5c038a..ae9e1c14bf2d5 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -89,6 +89,7 @@ pub struct CodegenCx<'ll, 'tcx: 'll> { const_addr_space: AddrSpaceIdx, mutable_addr_space: AddrSpaceIdx, flat_addr_space: AddrSpaceIdx, + instruction_addr_space: AddrSpaceIdx, pub dbg_cx: Option>, @@ -299,6 +300,11 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { .get(&AddrSpaceKind::Flat) .map(|v| v.index ) .unwrap_or_default(); + let instruction_addr_space = + tcx.sess.target.target.options.addr_spaces + .get(&AddrSpaceKind::Instruction) + .map(|v| v.index ) + .unwrap_or_default(); CodegenCx { tcx, @@ -325,6 +331,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { const_addr_space, mutable_addr_space, flat_addr_space, + instruction_addr_space, dbg_cx, eh_personality: Cell::new(None), @@ -497,6 +504,9 @@ impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> { from_props.shared_with.contains(&to_kind) } + fn inst_addr_space(&self) -> AddrSpaceIdx { + self.instruction_addr_space + } fn alloca_addr_space(&self) -> AddrSpaceIdx { self.alloca_addr_space } diff --git a/src/librustc_codegen_llvm/declare.rs b/src/librustc_codegen_llvm/declare.rs index 8489e7a98135b..c096f6dbc24ee 100644 --- a/src/librustc_codegen_llvm/declare.rs +++ b/src/librustc_codegen_llvm/declare.rs @@ -24,6 +24,7 @@ use context::CodegenCx; use type_::Type; use rustc_codegen_ssa::traits::*; use value::Value; +use common::val_ty; /// Declare a function. /// @@ -40,6 +41,7 @@ fn declare_raw_fn( let llfn = unsafe { llvm::LLVMRustGetOrInsertFunction(cx.llmod, namebuf.as_ptr(), ty) }; + assert_eq!(val_ty(llfn).address_space(), cx.inst_addr_space()); llvm::SetFunctionCallConv(llfn, callconv); // Function addresses in Rust are never significant, allowing functions to diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index dae5ff82aa52c..7db383203e2e0 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -337,7 +337,8 @@ impl LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { ty.llvm_type(self) } fn fn_ptr_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Type { - ty.ptr_to_llvm_type(self) + self.type_as_ptr_to(ty.llvm_type(self), self.inst_addr_space()) + } fn reg_backend_type(&self, ty: &Reg) -> &'ll Type { ty.llvm_type(self) diff --git a/src/librustc_codegen_ssa/meth.rs b/src/librustc_codegen_ssa/meth.rs index 98ad2616eeaae..79c7bc5fbbfc6 100644 --- a/src/librustc_codegen_ssa/meth.rs +++ b/src/librustc_codegen_ssa/meth.rs @@ -81,7 +81,7 @@ pub fn get_vtable<'tcx, Cx: CodegenMethods<'tcx>>( } // Not in the cache. Build it. - let nullptr = cx.const_null(cx.type_i8p()); + let nullptr = cx.const_null(cx.type_inst_i8p()); let methods_root; let methods = if let Some(trait_ref) = trait_ref { diff --git a/src/librustc_codegen_ssa/traits/misc.rs b/src/librustc_codegen_ssa/traits/misc.rs index bc55a06f7f8e8..268d2c7ed88a0 100644 --- a/src/librustc_codegen_ssa/traits/misc.rs +++ b/src/librustc_codegen_ssa/traits/misc.rs @@ -29,6 +29,7 @@ pub trait MiscMethods<'tcx>: BackendTypes { fn create_used_variable(&self); fn can_cast_addr_space(&self, _from: AddrSpaceIdx, _to: AddrSpaceIdx) -> bool { true } + fn inst_addr_space(&self) -> AddrSpaceIdx { Default::default() } fn alloca_addr_space(&self) -> AddrSpaceIdx { Default::default() } fn const_addr_space(&self) -> AddrSpaceIdx { Default::default() } fn mutable_addr_space(&self) -> AddrSpaceIdx { Default::default() } diff --git a/src/librustc_codegen_ssa/traits/type_.rs b/src/librustc_codegen_ssa/traits/type_.rs index 6d70f5b021a4c..18ea155e57834 100644 --- a/src/librustc_codegen_ssa/traits/type_.rs +++ b/src/librustc_codegen_ssa/traits/type_.rs @@ -84,6 +84,9 @@ pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> { fn type_i8p_as(&self, addr_space: AddrSpaceIdx) -> Self::Type { self.type_as_ptr_to(self.type_i8(), addr_space) } + fn type_inst_i8p(&self) -> Self::Type { + self.type_i8p_as(self.inst_addr_space()) + } fn type_alloca_i8p(&self) -> Self::Type { self.type_i8p_as(self.alloca_addr_space()) } @@ -214,6 +217,9 @@ pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> { _ => { }, } } + fn type_ptr_to_inst(&self, ty: Self::Type) -> Self::Type { + self.type_as_ptr_to(ty, self.inst_addr_space()) + } fn type_ptr_to_alloca(&self, ty: Self::Type) -> Self::Type { self.type_as_ptr_to(ty, self.alloca_addr_space()) } diff --git a/src/librustc_target/abi/mod.rs b/src/librustc_target/abi/mod.rs index 63c1244672ad3..ae16f6c6bbed9 100644 --- a/src/librustc_target/abi/mod.rs +++ b/src/librustc_target/abi/mod.rs @@ -31,8 +31,7 @@ pub struct TargetDataLayout { pub vector_align: Vec<(Size, AbiAndPrefAlign)>, pub alloca_address_space: AddrSpaceIdx, - - pub instruction_address_space: u32, + pub instruction_address_space: AddrSpaceIdx, } impl Default for TargetDataLayout { @@ -54,11 +53,11 @@ impl Default for TargetDataLayout { pointer_align: AbiAndPrefAlign::new(align(64)), aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) }, alloca_address_space: Default::default(), + instruction_address_space: Default::default(), vector_align: vec![ (Size::from_bits(64), AbiAndPrefAlign::new(align(64))), (Size::from_bits(128), AbiAndPrefAlign::new(align(128))), ], - instruction_address_space: 0, } } } @@ -128,6 +127,11 @@ impl TargetDataLayout { let align = align(a, p)?; resize_and_set(&mut dl.pointers, idx, Some((size, align))); }, + [ref p] if p.starts_with("P") => { + let idx = parse_bits(&p[1..], "u32", + "instruction address space")? as u32; + dl.instruction_address_space = AddrSpaceIdx(idx); + } [s, ref a..] if s.starts_with("i") => { let bits = match s[1..].parse::() { Ok(bits) => bits, diff --git a/src/librustc_target/spec/mod.rs b/src/librustc_target/spec/mod.rs index 8ddebd05ca871..f3fdf0028ded7 100644 --- a/src/librustc_target/spec/mod.rs +++ b/src/librustc_target/spec/mod.rs @@ -288,6 +288,8 @@ pub enum AddrSpaceKind { ReadOnly, /// aka global ReadWrite, + /// For Harvard architectures, the program instruction's address space + Instruction, Named(String), } @@ -299,6 +301,7 @@ impl FromStr for AddrSpaceKind { "alloca" => AddrSpaceKind::Alloca, "readonly" => AddrSpaceKind::ReadOnly, "readwrite" => AddrSpaceKind::ReadWrite, + "instruction" => AddrSpaceKind::Instruction, named => AddrSpaceKind::Named(named.into()), }) } @@ -310,6 +313,7 @@ impl fmt::Display for AddrSpaceKind { &AddrSpaceKind::Alloca => "alloca", &AddrSpaceKind::ReadOnly => "readonly", &AddrSpaceKind::ReadWrite => "readwrite", + &AddrSpaceKind::Instruction => "instruction", &AddrSpaceKind::Named(ref s) => s, }) } @@ -389,10 +393,13 @@ impl Default for AddrSpaces { fn default() -> Self { let mut asp = BTreeMap::new(); - let kinds = vec![AddrSpaceKind::ReadOnly, - AddrSpaceKind::ReadWrite, - AddrSpaceKind::Alloca, - AddrSpaceKind::Flat, ]; + let kinds = vec![ + AddrSpaceKind::ReadOnly, + AddrSpaceKind::ReadWrite, + AddrSpaceKind::Alloca, + AddrSpaceKind::Flat, + AddrSpaceKind::Instruction, + ]; let insert = |asp: &mut BTreeMap<_, _>, kind, idx| { let props = AddrSpaceProps { From 1019e2017cb96dfe312983f06382fc909a909626 Mon Sep 17 00:00:00 2001 From: Richard Diamond Date: Tue, 23 Oct 2018 05:57:40 -0500 Subject: [PATCH 3/3] LLVM value address space optimizations: leave values in their original address space where possible. --- src/librustc_codegen_llvm/abi.rs | 4 +- src/librustc_codegen_ssa/mir/block.rs | 19 ++++----- src/librustc_codegen_ssa/mir/mod.rs | 9 +++-- src/librustc_codegen_ssa/mir/place.rs | 2 +- src/librustc_codegen_ssa/mir/rvalue.rs | 55 ++++++++++++++++---------- 5 files changed, 53 insertions(+), 36 deletions(-) diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 2a603433efb9f..17c67450af728 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -652,7 +652,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { } PassMode::Cast(cast) => cast.llvm_type(cx), PassMode::Indirect(..) => { - llargument_tys.push(cx.type_ptr_to_flat(self.ret.memory_ty(cx))); + llargument_tys.push(cx.type_ptr_to_alloca(self.ret.memory_ty(cx))); cx.type_void() } }; @@ -682,7 +682,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { continue; } PassMode::Cast(cast) => cast.llvm_type(cx), - PassMode::Indirect(_, None) => cx.type_ptr_to_flat(arg.memory_ty(cx)), + PassMode::Indirect(_, None) => cx.type_ptr_to_alloca(arg.memory_ty(cx)), }; llargument_tys.push(llarg_ty); } diff --git a/src/librustc_codegen_ssa/mir/block.rs b/src/librustc_codegen_ssa/mir/block.rs index e457f06028dbc..d3ce3f2778333 100644 --- a/src/librustc_codegen_ssa/mir/block.rs +++ b/src/librustc_codegen_ssa/mir/block.rs @@ -263,7 +263,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let llslot = match op.val { Immediate(_) | Pair(..) => { let scratch = - PlaceRef::alloca(&mut bx, self.fn_ty.ret.layout, "ret"); + PlaceRef::alloca_addr_space(&mut bx, self.fn_ty.ret.layout, + "ret"); op.val.store(&mut bx, scratch); scratch.llval } @@ -791,7 +792,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { Immediate(_) | Pair(..) => { match arg.mode { PassMode::Indirect(..) | PassMode::Cast(_) => { - let scratch = PlaceRef::alloca(bx, arg.layout, "arg"); + let scratch = PlaceRef::alloca_addr_space(bx, arg.layout, "arg"); op.val.store(bx, scratch); (scratch.llval, scratch.align, true) } @@ -806,12 +807,12 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't // have scary latent bugs around. - let scratch = PlaceRef::alloca(bx, arg.layout, "arg"); + let scratch = PlaceRef::alloca_addr_space(bx, arg.layout, "arg"); base::memcpy_ty(bx, scratch.llval, scratch.align, llval, align, op.layout, MemFlags::empty()); (scratch.llval, scratch.align, true) } else { - (llval, align, true) + (bx.flat_addr_cast(llval), align, true) } } }; @@ -883,7 +884,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { cx.tcx().mk_mut_ptr(cx.tcx().types.u8), cx.tcx().types.i32 ])); - let slot = PlaceRef::alloca(bx, layout, "personalityslot"); + let slot = PlaceRef::alloca_addr_space(bx, layout, "personalityslot"); self.personality_slot = Some(slot); slot } @@ -979,7 +980,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { return if fn_ret.is_indirect() { // Odd, but possible, case, we have an operand temporary, // but the calling convention has an indirect return. - let tmp = PlaceRef::alloca(bx, fn_ret.layout, "tmp_ret"); + let tmp = PlaceRef::alloca_addr_space(bx, fn_ret.layout, "tmp_ret"); tmp.storage_live(bx); llargs.push(tmp.llval); ReturnDest::IndirectOperand(tmp, index) @@ -987,7 +988,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Currently, intrinsics always need a location to store // the result. so we create a temporary alloca for the // result - let tmp = PlaceRef::alloca(bx, fn_ret.layout, "tmp_ret"); + let tmp = PlaceRef::alloca_addr_space(bx, fn_ret.layout, "tmp_ret"); tmp.storage_live(bx); ReturnDest::IndirectOperand(tmp, index) } else { @@ -1031,7 +1032,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { LocalRef::Operand(None) => { let dst_layout = bx.layout_of(self.monomorphized_place_ty(dst)); assert!(!dst_layout.ty.has_erasable_regions()); - let place = PlaceRef::alloca(bx, dst_layout, "transmute_temp"); + let place = PlaceRef::alloca_addr_space(bx, dst_layout, "transmute_temp"); place.storage_live(bx); self.codegen_transmute_into(bx, src, place); let op = bx.load_operand(place); @@ -1084,7 +1085,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { DirectOperand(index) => { // If there is a cast, we have to store and reload. let op = if let PassMode::Cast(_) = ret_ty.mode { - let tmp = PlaceRef::alloca(bx, ret_ty.layout, "tmp_ret"); + let tmp = PlaceRef::alloca_addr_space(bx, ret_ty.layout, "tmp_ret"); tmp.storage_live(bx); bx.store_arg_ty(&ret_ty, llval, tmp); let op = bx.load_operand(tmp); diff --git a/src/librustc_codegen_ssa/mir/mod.rs b/src/librustc_codegen_ssa/mir/mod.rs index 85a663dacdcc5..719cc3d2080b5 100644 --- a/src/librustc_codegen_ssa/mir/mod.rs +++ b/src/librustc_codegen_ssa/mir/mod.rs @@ -277,7 +277,7 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( // FIXME: add an appropriate debuginfo LocalRef::UnsizedPlace(indirect_place) } else { - let place = PlaceRef::alloca(&mut bx, layout, &name.as_str()); + let place = PlaceRef::alloca_addr_space(&mut bx, layout, &name.as_str()); if dbg { let (scope, span) = fx.debug_loc(mir::SourceInfo { span: decl.source_info.span, @@ -305,7 +305,8 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( ); LocalRef::UnsizedPlace(indirect_place) } else { - LocalRef::Place(PlaceRef::alloca(&mut bx, layout, &format!("{:?}", local))) + LocalRef::Place(PlaceRef::alloca_addr_space(&mut bx, layout, + &format!("{:?}", local))) } } else { // If this is an immediate local, we do not create an @@ -468,7 +469,7 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( _ => bug!("spread argument isn't a tuple?!") }; - let place = PlaceRef::alloca(bx, bx.layout_of(arg_ty), &name); + let place = PlaceRef::alloca_addr_space(bx, bx.layout_of(arg_ty), &name); for i in 0..tupled_arg_tys.len() { let arg = &fx.fn_ty.args[idx]; idx += 1; @@ -559,7 +560,7 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( indirect_operand.store(bx, tmp); tmp } else { - let tmp = PlaceRef::alloca(bx, arg.layout, &name); + let tmp = PlaceRef::alloca_addr_space(bx, arg.layout, &name); bx.store_fn_arg(arg, &mut llarg_idx, tmp); tmp }; diff --git a/src/librustc_codegen_ssa/mir/place.rs b/src/librustc_codegen_ssa/mir/place.rs index bceafeec04ad7..820df095aafcc 100644 --- a/src/librustc_codegen_ssa/mir/place.rs +++ b/src/librustc_codegen_ssa/mir/place.rs @@ -74,7 +74,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { assert!(layout.is_unsized(), "tried to allocate indirect place for sized values"); let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty); let ptr_layout = bx.cx().layout_of(ptr_ty); - Self::alloca(bx, ptr_layout, name) + Self::alloca_addr_space(bx, ptr_layout, name) } pub fn len>( diff --git a/src/librustc_codegen_ssa/mir/rvalue.rs b/src/librustc_codegen_ssa/mir/rvalue.rs index 97cfb5854674b..093a2deb01c98 100644 --- a/src/librustc_codegen_ssa/mir/rvalue.rs +++ b/src/librustc_codegen_ssa/mir/rvalue.rs @@ -18,6 +18,36 @@ use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; use super::place::PlaceRef; +fn codegen_binop_fixup<'a, 'tcx: 'a, Bx>(bx: &mut Bx, + lhs: Bx::Value, + rhs: Bx::Value) + -> (Bx::Value, Bx::Value) + where Bx: BuilderMethods<'a, 'tcx>, +{ + // In case we're in separate addr spaces. + // Can happen when cmp against null_mut, eg. + // `infer-addr-spaces` should propagate. + // But, empirically, `infer-addr-spaces` doesn't. + let fix_null_ty = |val, this_ty, other_ty| { + if bx.cx().const_null(this_ty) == val { + bx.cx().const_null(other_ty) + } else { + val + } + }; + let lhs_ty = bx.cx().val_ty(lhs); + let rhs_ty = bx.cx().val_ty(rhs); + let lhs = fix_null_ty(lhs, lhs_ty, rhs_ty); + let rhs = fix_null_ty(rhs, rhs_ty, lhs_ty); + if bx.cx().type_addr_space(lhs_ty).is_some() { + assert!(bx.cx().type_addr_space(rhs_ty).is_some()); + (bx.flat_addr_cast(lhs), + bx.flat_addr_cast(rhs)) + } else { + (lhs, rhs) + } +} + impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { pub fn codegen_rvalue( &mut self, @@ -63,7 +93,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // index into the struct, and this case isn't // important enough for it. debug!("codegen_rvalue: creating ugly alloca"); - let scratch = PlaceRef::alloca(&mut bx, operand.layout, "__unsize_temp"); + let scratch = PlaceRef::alloca_addr_space(&mut bx, operand.layout, + "__unsize_temp"); scratch.storage_live(&mut bx); operand.val.store(&mut bx, scratch); base::coerce_unsized_into(&mut bx, scratch, dest); @@ -89,7 +120,6 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } let zero = bx.cx().const_usize(0); let start = dest.project_index(&mut bx, zero).llval; - let start = bx.flat_addr_cast(start); if let OperandValue::Immediate(v) = cg_elem.val { let size = bx.cx().const_usize(dest.layout.size.bytes()); @@ -111,6 +141,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let count = bx.cx().const_usize(count); let end = dest.project_index(&mut bx, count).llval; + let start = bx.flat_addr_cast(start); let end = bx.flat_addr_cast(end); let mut header_bx = bx.build_sibling_block("repeat_loop_header"); @@ -245,7 +276,6 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // until LLVM removes pointee types. let lldata = bx.pointercast(lldata, bx.cx().scalar_pair_element_backend_type(cast, 0, true)); - let lldata = bx.flat_addr_cast(lldata); OperandValue::Pair(lldata, llextra) } OperandValue::Immediate(lldata) => { @@ -618,17 +648,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { lhs, rhs ) } else { - // In case we're in separate addr spaces. - // Can happen when cmp against null_mut, eg. - // `infer-addr-spaces` should propagate. - let lhs_ty = bx.cx().val_ty(rhs); - let (lhs, rhs) = if bx.cx().type_addr_space(lhs_ty).is_some() { - assert!(bx.cx().type_addr_space(bx.cx().val_ty(rhs)).is_some()); - (bx.flat_addr_cast(lhs), - bx.flat_addr_cast(rhs)) - } else { - (lhs, rhs) - }; + let (lhs, rhs) = codegen_binop_fixup(bx, lhs, rhs); bx.icmp( base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed), lhs, rhs @@ -647,12 +667,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { rhs_extra: Bx::Value, _input_ty: Ty<'tcx>, ) -> Bx::Value { - // In case we're in separate addr spaces. - // Can happen when cmp against null_mut, eg. - // `infer-addr-spaces` should propagate. - let lhs_addr = bx.flat_addr_cast(lhs_addr); - let rhs_addr = bx.flat_addr_cast(rhs_addr); - + let (lhs_addr, rhs_addr) = codegen_binop_fixup(bx, lhs_addr, rhs_addr); match op { mir::BinOp::Eq => { let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);