Skip to content

Commit

Permalink
Auto merge of #86873 - nikic:opaque-ptrs, r=nagisa
Browse files Browse the repository at this point in the history
Improve opaque pointers support

Opaque pointers are coming, and rustc is not ready.

This adds partial support by passing an explicit load type to LLVM. Two issues I've encountered:
 * The necessary type was not available at the point where non-temporal copies were generated. I've pushed the code for that upwards out of the memcpy implementation and moved the position of a cast to make do with the types we have available. (I'm not sure that cast is needed at all, but have retained it in the interest of conservativeness.)
 * The `PlaceRef::project_deref()` function used during debuginfo generation seems to be buggy in some way -- though I haven't figured out specifically what it does wrong. Replacing it with `load_operand().deref()` did the trick, but I don't really know what I'm doing here.
  • Loading branch information
bors committed Jul 10, 2021
2 parents a31431f + 2ce1add commit 432e145
Show file tree
Hide file tree
Showing 13 changed files with 81 additions and 74 deletions.
31 changes: 11 additions & 20 deletions compiler/rustc_codegen_llvm/src/builder.rs
Expand Up @@ -410,31 +410,33 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
}

fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value {
fn load(&mut self, ty: &'ll Type, ptr: &'ll Value, align: Align) -> &'ll Value {
unsafe {
let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED);
let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED);
llvm::LLVMSetAlignment(load, align.bytes() as c_uint);
load
}
}

fn volatile_load(&mut self, ptr: &'ll Value) -> &'ll Value {
fn volatile_load(&mut self, ty: &'ll Type, ptr: &'ll Value) -> &'ll Value {
unsafe {
let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED);
let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED);
llvm::LLVMSetVolatile(load, llvm::True);
load
}
}

fn atomic_load(
&mut self,
ty: &'ll Type,
ptr: &'ll Value,
order: rustc_codegen_ssa::common::AtomicOrdering,
size: Size,
) -> &'ll Value {
unsafe {
let load = llvm::LLVMRustBuildAtomicLoad(
self.llbuilder,
ty,
ptr,
UNNAMED,
AtomicOrdering::from_generic(order),
Expand Down Expand Up @@ -486,7 +488,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
}
let llval = const_llval.unwrap_or_else(|| {
let load = self.load(place.llval, place.align);
let load = self.load(place.layout.llvm_type(self), place.llval, place.align);
if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
scalar_load_metadata(self, load, scalar);
}
Expand All @@ -498,7 +500,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {

let mut load = |i, scalar: &abi::Scalar, align| {
let llptr = self.struct_gep(place.llval, i as u64);
let load = self.load(llptr, align);
let llty = place.layout.scalar_pair_element_llvm_type(self, i, false);
let load = self.load(llty, llptr, align);
scalar_load_metadata(self, load, scalar);
self.to_immediate_scalar(load, scalar)
};
Expand Down Expand Up @@ -815,13 +818,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
size: &'ll Value,
flags: MemFlags,
) {
if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
let val = self.load(src, src_align);
let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
self.store_with_flags(val, ptr, dst_align, flags);
return;
}
assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
let size = self.intcast(size, self.type_isize(), false);
let is_volatile = flags.contains(MemFlags::VOLATILE);
let dst = self.pointercast(dst, self.type_i8p());
Expand All @@ -848,13 +845,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
size: &'ll Value,
flags: MemFlags,
) {
if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memmove.
let val = self.load(src, src_align);
let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
self.store_with_flags(val, ptr, dst_align, flags);
return;
}
assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memmove not supported");
let size = self.intcast(size, self.type_isize(), false);
let is_volatile = flags.contains(MemFlags::VOLATILE);
let dst = self.pointercast(dst, self.type_i8p());
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
Expand Up @@ -20,7 +20,7 @@ pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_,
// LLVM to keep around the reference to the global.
let indices = [bx.const_i32(0), bx.const_i32(0)];
let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices);
let volative_load_instruction = bx.volatile_load(element);
let volative_load_instruction = bx.volatile_load(bx.type_i8(), element);
unsafe {
llvm::LLVMSetAlignment(volative_load_instruction, 1);
}
Expand Down
19 changes: 11 additions & 8 deletions compiler/rustc_codegen_llvm/src/intrinsic.rs
Expand Up @@ -162,11 +162,14 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {

sym::volatile_load | sym::unaligned_volatile_load => {
let tp_ty = substs.type_at(0);
let mut ptr = args[0].immediate();
if let PassMode::Cast(ty) = fn_abi.ret.mode {
ptr = self.pointercast(ptr, self.type_ptr_to(ty.llvm_type(self)));
}
let load = self.volatile_load(ptr);
let ptr = args[0].immediate();
let load = if let PassMode::Cast(ty) = fn_abi.ret.mode {
let llty = ty.llvm_type(self);
let ptr = self.pointercast(ptr, self.type_ptr_to(llty));
self.volatile_load(llty, ptr)
} else {
self.volatile_load(self.layout_of(tp_ty).llvm_type(self), ptr)
};
let align = if name == sym::unaligned_volatile_load {
1
} else {
Expand Down Expand Up @@ -319,9 +322,9 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
let integer_ty = self.type_ix(layout.size.bits());
let ptr_ty = self.type_ptr_to(integer_ty);
let a_ptr = self.bitcast(a, ptr_ty);
let a_val = self.load(a_ptr, layout.align.abi);
let a_val = self.load(integer_ty, a_ptr, layout.align.abi);
let b_ptr = self.bitcast(b, ptr_ty);
let b_val = self.load(b_ptr, layout.align.abi);
let b_val = self.load(integer_ty, b_ptr, layout.align.abi);
self.icmp(IntPredicate::IntEQ, a_val, b_val)
} else {
let i8p_ty = self.type_i8p();
Expand Down Expand Up @@ -540,7 +543,7 @@ fn codegen_msvc_try(
// Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
let flags = bx.const_i32(8);
let funclet = catchpad_rust.catch_pad(cs, &[tydesc, flags, slot]);
let ptr = catchpad_rust.load(slot, ptr_align);
let ptr = catchpad_rust.load(bx.type_i8p(), slot, ptr_align);
catchpad_rust.call(catch_func, &[data, ptr], Some(&funclet));
catchpad_rust.catch_ret(&funclet, caught.llbb());

Expand Down
8 changes: 7 additions & 1 deletion compiler/rustc_codegen_llvm/src/llvm/ffi.rs
Expand Up @@ -1385,7 +1385,12 @@ extern "C" {
Val: &'a Value,
Name: *const c_char,
) -> &'a Value;
pub fn LLVMBuildLoad(B: &Builder<'a>, PointerVal: &'a Value, Name: *const c_char) -> &'a Value;
pub fn LLVMBuildLoad2(
B: &Builder<'a>,
Ty: &'a Type,
PointerVal: &'a Value,
Name: *const c_char,
) -> &'a Value;

pub fn LLVMBuildStore(B: &Builder<'a>, Val: &'a Value, Ptr: &'a Value) -> &'a Value;

Expand Down Expand Up @@ -1631,6 +1636,7 @@ extern "C" {
// Atomic Operations
pub fn LLVMRustBuildAtomicLoad(
B: &Builder<'a>,
ElementType: &'a Type,
PointerVal: &'a Value,
Name: *const c_char,
Order: AtomicOrdering,
Expand Down
21 changes: 12 additions & 9 deletions compiler/rustc_codegen_llvm/src/va_arg.rs
Expand Up @@ -32,14 +32,15 @@ fn emit_direct_ptr_va_arg(
slot_size: Align,
allow_higher_align: bool,
) -> (&'ll Value, Align) {
let va_list_ptr_ty = bx.cx().type_ptr_to(bx.cx.type_i8p());
let va_list_ty = bx.type_i8p();
let va_list_ptr_ty = bx.type_ptr_to(va_list_ty);
let va_list_addr = if list.layout.llvm_type(bx.cx) != va_list_ptr_ty {
bx.bitcast(list.immediate(), va_list_ptr_ty)
} else {
list.immediate()
};

let ptr = bx.load(va_list_addr, bx.tcx().data_layout.pointer_align.abi);
let ptr = bx.load(va_list_ty, va_list_addr, bx.tcx().data_layout.pointer_align.abi);

let (addr, addr_align) = if allow_higher_align && align > slot_size {
(round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align)
Expand Down Expand Up @@ -82,10 +83,10 @@ fn emit_ptr_va_arg(
let (addr, addr_align) =
emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, slot_size, allow_higher_align);
if indirect {
let tmp_ret = bx.load(addr, addr_align);
bx.load(tmp_ret, align.abi)
let tmp_ret = bx.load(llty, addr, addr_align);
bx.load(bx.cx.layout_of(target_ty).llvm_type(bx.cx), tmp_ret, align.abi)
} else {
bx.load(addr, addr_align)
bx.load(llty, addr, addr_align)
}
}

Expand Down Expand Up @@ -118,7 +119,7 @@ fn emit_aapcs_va_arg(
};

// if the offset >= 0 then the value will be on the stack
let mut reg_off_v = bx.load(reg_off, offset_align);
let mut reg_off_v = bx.load(bx.type_i32(), reg_off, offset_align);
let use_stack = bx.icmp(IntPredicate::IntSGE, reg_off_v, zero);
bx.cond_br(use_stack, &on_stack.llbb(), &maybe_reg.llbb());

Expand All @@ -139,8 +140,9 @@ fn emit_aapcs_va_arg(
let use_stack = maybe_reg.icmp(IntPredicate::IntSGT, new_reg_off_v, zero);
maybe_reg.cond_br(use_stack, &on_stack.llbb(), &in_reg.llbb());

let top_type = bx.type_i8p();
let top = in_reg.struct_gep(va_list_addr, reg_top_index);
let top = in_reg.load(top, bx.tcx().data_layout.pointer_align.abi);
let top = in_reg.load(top_type, top, bx.tcx().data_layout.pointer_align.abi);

// reg_value = *(@top + reg_off_v);
let mut reg_addr = in_reg.gep(top, &[reg_off_v]);
Expand All @@ -149,8 +151,9 @@ fn emit_aapcs_va_arg(
let offset = bx.const_i32((slot_size - layout.size.bytes()) as i32);
reg_addr = in_reg.gep(reg_addr, &[offset]);
}
let reg_addr = in_reg.bitcast(reg_addr, bx.cx.type_ptr_to(layout.llvm_type(bx)));
let reg_value = in_reg.load(reg_addr, layout.align.abi);
let reg_type = layout.llvm_type(bx);
let reg_addr = in_reg.bitcast(reg_addr, bx.cx.type_ptr_to(reg_type));
let reg_value = in_reg.load(reg_type, reg_addr, layout.align.abi);
in_reg.br(&end.llbb());

// On Stack block
Expand Down
10 changes: 6 additions & 4 deletions compiler/rustc_codegen_ssa/src/meth.rs
Expand Up @@ -20,10 +20,11 @@ impl<'a, 'tcx> VirtualIndex {
// Load the data pointer from the object.
debug!("get_fn({:?}, {:?})", llvtable, self);

let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(bx.fn_ptr_backend_type(fn_abi)));
let llty = bx.fn_ptr_backend_type(fn_abi);
let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty));
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]);
let ptr = bx.load(gep, ptr_align);
let ptr = bx.load(llty, gep, ptr_align);
bx.nonnull_metadata(ptr);
// Vtable loads are invariant.
bx.set_invariant_load(ptr);
Expand All @@ -38,10 +39,11 @@ impl<'a, 'tcx> VirtualIndex {
// Load the data pointer from the object.
debug!("get_int({:?}, {:?})", llvtable, self);

let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(bx.type_isize()));
let llty = bx.type_isize();
let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty));
let usize_align = bx.tcx().data_layout.pointer_align.abi;
let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]);
let ptr = bx.load(gep, usize_align);
let ptr = bx.load(llty, gep, usize_align);
// Vtable loads are invariant.
bx.set_invariant_load(ptr);
ptr
Expand Down
14 changes: 8 additions & 6 deletions compiler/rustc_codegen_ssa/src/mir/block.rs
Expand Up @@ -260,7 +260,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
PassMode::Direct(_) | PassMode::Pair(..) => {
let op = self.codegen_consume(&mut bx, mir::Place::return_place().as_ref());
if let Ref(llval, _, align) = op.val {
bx.load(llval, align)
bx.load(bx.backend_type(op.layout), llval, align)
} else {
op.immediate_or_packed_pair(&mut bx)
}
Expand All @@ -287,8 +287,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
llval
}
};
let addr = bx.pointercast(llslot, bx.type_ptr_to(bx.cast_backend_type(&cast_ty)));
bx.load(addr, self.fn_abi.ret.layout.align.abi)
let ty = bx.cast_backend_type(&cast_ty);
let addr = bx.pointercast(llslot, bx.type_ptr_to(ty));
bx.load(ty, addr, self.fn_abi.ret.layout.align.abi)
}
};
bx.ret(llval);
Expand Down Expand Up @@ -1086,15 +1087,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if by_ref && !arg.is_indirect() {
// Have to load the argument, maybe while casting it.
if let PassMode::Cast(ty) = arg.mode {
let addr = bx.pointercast(llval, bx.type_ptr_to(bx.cast_backend_type(&ty)));
llval = bx.load(addr, align.min(arg.layout.align.abi));
let llty = bx.cast_backend_type(&ty);
let addr = bx.pointercast(llval, bx.type_ptr_to(llty));
llval = bx.load(llty, addr, align.min(arg.layout.align.abi));
} else {
// We can't use `PlaceRef::load` here because the argument
// may have a type we don't treat as immediate, but the ABI
// used for this call is passing it by-value. In that case,
// the load would just produce `OperandValue::Ref` instead
// of the `OperandValue::Immediate` we need for the call.
llval = bx.load(llval, align);
llval = bx.load(bx.backend_type(arg.layout), llval, align);
if let abi::Abi::Scalar(ref scalar) = arg.layout.abi {
if scalar.is_bool() {
bx.range_metadata(llval, 0..2);
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
Expand Up @@ -274,7 +274,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
match *elem {
mir::ProjectionElem::Deref => {
indirect_offsets.push(Size::ZERO);
place = place.project_deref(bx);
place = bx.load_operand(place).deref(bx.cx());
}
mir::ProjectionElem::Field(field, _) => {
let i = field.index();
Expand Down
9 changes: 4 additions & 5 deletions compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
Expand Up @@ -448,15 +448,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if ty.is_unsafe_ptr() {
// Some platforms do not support atomic operations on pointers,
// so we cast to integer first...
let ptr_llty = bx.type_ptr_to(bx.type_isize());
let llty = bx.type_isize();
let ptr_llty = bx.type_ptr_to(llty);
source = bx.pointercast(source, ptr_llty);
}
let result = bx.atomic_load(source, order, size);
if ty.is_unsafe_ptr() {
let result = bx.atomic_load(llty, source, order, size);
// ... and then cast the result back to a pointer
bx.inttoptr(result, bx.backend_type(layout))
} else {
result
bx.atomic_load(bx.backend_type(layout), source, order, size)
}
} else {
return invalid_monomorphization(ty);
Expand Down
8 changes: 8 additions & 0 deletions compiler/rustc_codegen_ssa/src/mir/operand.rs
Expand Up @@ -289,6 +289,14 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
}
match self {
OperandValue::Ref(r, None, source_align) => {
if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
let ty = bx.backend_type(dest.layout);
let ptr = bx.pointercast(r, bx.type_ptr_to(ty));
let val = bx.load(ty, ptr, source_align);
bx.store_with_flags(val, dest.llval, dest.align, flags);
return;
}
base::memcpy_ty(bx, dest.llval, dest.align, r, source_align, dest.layout, flags)
}
OperandValue::Ref(_, Some(_), _) => {
Expand Down
12 changes: 0 additions & 12 deletions compiler/rustc_codegen_ssa/src/mir/place.rs
Expand Up @@ -402,18 +402,6 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
downcast
}

pub fn project_deref<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) -> Self {
let target_ty = self.layout.ty.builtin_deref(true).expect("failed to deref");
let layout = bx.layout_of(target_ty.ty);

PlaceRef {
llval: bx.load(self.llval, self.align),
llextra: None,
layout,
align: layout.align.abi,
}
}

pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
bx.lifetime_start(self.llval, self.layout.size);
}
Expand Down
12 changes: 9 additions & 3 deletions compiler/rustc_codegen_ssa/src/traits/builder.rs
Expand Up @@ -137,9 +137,15 @@ pub trait BuilderMethods<'a, 'tcx>:
fn dynamic_alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
fn array_alloca(&mut self, ty: Self::Type, len: Self::Value, align: Align) -> Self::Value;

fn load(&mut self, ptr: Self::Value, align: Align) -> Self::Value;
fn volatile_load(&mut self, ptr: Self::Value) -> Self::Value;
fn atomic_load(&mut self, ptr: Self::Value, order: AtomicOrdering, size: Size) -> Self::Value;
fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value;
fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value;
fn atomic_load(
&mut self,
ty: Self::Type,
ptr: Self::Value,
order: AtomicOrdering,
size: Size,
) -> Self::Value;
fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>)
-> OperandRef<'tcx, Self::Value>;

Expand Down
7 changes: 3 additions & 4 deletions compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
Expand Up @@ -349,11 +349,10 @@ extern "C" void LLVMRustSetFastMath(LLVMValueRef V) {
}

extern "C" LLVMValueRef
LLVMRustBuildAtomicLoad(LLVMBuilderRef B, LLVMValueRef Source, const char *Name,
LLVMAtomicOrdering Order) {
LLVMRustBuildAtomicLoad(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Source,
const char *Name, LLVMAtomicOrdering Order) {
Value *Ptr = unwrap(Source);
Type *Ty = Ptr->getType()->getPointerElementType();
LoadInst *LI = unwrap(B)->CreateLoad(Ty, Ptr, Name);
LoadInst *LI = unwrap(B)->CreateLoad(unwrap(Ty), Ptr, Name);
LI->setAtomic(fromRust(Order));
return wrap(LI);
}
Expand Down

0 comments on commit 432e145

Please sign in to comment.