Skip to content

Commit

Permalink
Prefer GEP instructions over weird pointer casting
Browse files Browse the repository at this point in the history
There are two places left where we used to only know the byte
size of/offset into an array and had to cast to i8 and back to get the
right addresses. But by now, we always know the sizes in terms of the
number of elements in the array. In fact we have to add an extra Mul
instruction so we can use the weird cast-to-u8 code. So we should really
just embrace our new knowledge and use simple GEPs to do the address
calculations.

Additionally, the pointer calculations in bind_subslice_pat don't handle
zero-sized types correctly, producing slices that point outside the
array that is being matched against. Using GEP fixes that as well.

Fixes #3729
  • Loading branch information
dotdash committed Jan 13, 2015
1 parent e94a9f0 commit 56671cb
Show file tree
Hide file tree
Showing 6 changed files with 25 additions and 44 deletions.
5 changes: 2 additions & 3 deletions src/librustc_trans/trans/_match.rs
Expand Up @@ -201,7 +201,7 @@ use middle::pat_util::*;
use trans::adt;
use trans::base::*;
use trans::build::{AddCase, And, BitCast, Br, CondBr, GEPi, InBoundsGEP, Load};
use trans::build::{Mul, Not, Store, Sub, add_comment};
use trans::build::{Not, Store, Sub, add_comment};
use trans::build;
use trans::callee;
use trans::cleanup::{self, CleanupMethods};
Expand Down Expand Up @@ -630,8 +630,7 @@ fn bind_subslice_pat(bcx: Block,
let vec_datum = match_datum(val, vec_ty);
let (base, len) = vec_datum.get_vec_base_and_len(bcx);

let slice_byte_offset = Mul(bcx, vt.llunit_size, C_uint(bcx.ccx(), offset_left));
let slice_begin = tvec::pointer_add_byte(bcx, base, slice_byte_offset);
let slice_begin = InBoundsGEP(bcx, base, &[C_uint(bcx.ccx(), offset_left)]);
let slice_len_offset = C_uint(bcx.ccx(), offset_left + offset_right);
let slice_len = Sub(bcx, len, slice_len_offset);
let slice_ty = ty::mk_slice(bcx.tcx(),
Expand Down
9 changes: 0 additions & 9 deletions src/librustc_trans/trans/base.rs
Expand Up @@ -546,15 +546,6 @@ pub fn get_res_dtor<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
}
}

// Structural comparison: a rather involved form of glue.
pub fn maybe_name_value(cx: &CrateContext, v: ValueRef, s: &str) {
if cx.sess().opts.cg.save_temps {
let buf = CString::from_slice(s.as_bytes());
unsafe { llvm::LLVMSetValueName(v, buf.as_ptr()) }
}
}


// Used only for creating scalar comparison glue.
#[derive(Copy)]
pub enum scalar_type { nil_type, signed_int, unsigned_int, floating_point, }
Expand Down
1 change: 0 additions & 1 deletion src/librustc_trans/trans/expr.rs
Expand Up @@ -768,7 +768,6 @@ fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
tvec::vec_types(bcx,
ty::sequence_element_type(bcx.tcx(),
base_datum.ty));
base::maybe_name_value(bcx.ccx(), vt.llunit_size, "unit_sz");

let (base, len) = base_datum.get_vec_base_and_len(bcx);

Expand Down
11 changes: 0 additions & 11 deletions src/librustc_trans/trans/machine.rs
Expand Up @@ -82,17 +82,6 @@ pub fn llsize_of(cx: &CrateContext, ty: Type) -> ValueRef {
return C_uint(cx, llsize_of_alloc(cx, ty));
}

// Returns the "default" size of t (see above), or 1 if the size would
// be zero. This is important for things like vectors that expect
// space to be consumed.
pub fn nonzero_llsize_of(cx: &CrateContext, ty: Type) -> ValueRef {
if llbitsize_of_real(cx, ty) == 0 {
unsafe { llvm::LLVMConstInt(cx.int_type().to_ref(), 1, False) }
} else {
llsize_of(cx, ty)
}
}

// Returns the preferred alignment of the given type for the current target.
// The preferred alignment may be larger than the alignment used when
// packing the type into structs. This will be used for things like
Expand Down
24 changes: 4 additions & 20 deletions src/librustc_trans/trans/tvec.rs
Expand Up @@ -25,7 +25,7 @@ use trans::expr::{Dest, Ignore, SaveIn};
use trans::expr;
use trans::glue;
use trans::machine;
use trans::machine::{nonzero_llsize_of, llsize_of_alloc};
use trans::machine::llsize_of_alloc;
use trans::type_::Type;
use trans::type_of;
use middle::ty::{self, Ty};
Expand All @@ -44,13 +44,6 @@ fn get_dataptr(bcx: Block, vptr: ValueRef) -> ValueRef {
Load(bcx, expr::get_dataptr(bcx, vptr))
}

pub fn pointer_add_byte(bcx: Block, ptr: ValueRef, bytes: ValueRef) -> ValueRef {
let _icx = push_ctxt("tvec::pointer_add_byte");
let old_ty = val_ty(ptr);
let bptr = PointerCast(bcx, ptr, Type::i8p(bcx.ccx()));
return PointerCast(bcx, InBoundsGEP(bcx, bptr, &[bytes]), old_ty);
}

pub fn make_drop_glue_unboxed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
vptr: ValueRef,
unit_ty: Ty<'tcx>,
Expand Down Expand Up @@ -94,17 +87,14 @@ pub fn make_drop_glue_unboxed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
pub struct VecTypes<'tcx> {
pub unit_ty: Ty<'tcx>,
pub llunit_ty: Type,
pub llunit_size: ValueRef,
pub llunit_alloc_size: u64
}

impl<'tcx> VecTypes<'tcx> {
pub fn to_string<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> String {
format!("VecTypes {{unit_ty={}, llunit_ty={}, \
llunit_size={}, llunit_alloc_size={}}}",
format!("VecTypes {{unit_ty={}, llunit_ty={}, llunit_alloc_size={}}}",
ty_to_string(ccx.tcx(), self.unit_ty),
ccx.tn().type_to_string(self.llunit_ty),
ccx.tn().val_to_string(self.llunit_size),
self.llunit_alloc_size)
}
}
Expand Down Expand Up @@ -333,13 +323,11 @@ pub fn vec_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-> VecTypes<'tcx> {
let ccx = bcx.ccx();
let llunit_ty = type_of::type_of(ccx, unit_ty);
let llunit_size = nonzero_llsize_of(ccx, llunit_ty);
let llunit_alloc_size = llsize_of_alloc(ccx, llunit_ty);

VecTypes {
unit_ty: unit_ty,
llunit_ty: llunit_ty,
llunit_size: llunit_size,
llunit_alloc_size: llunit_alloc_size
}
}
Expand Down Expand Up @@ -486,17 +474,13 @@ pub fn iter_vec_raw<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
let fcx = bcx.fcx;

let vt = vec_types(bcx, unit_ty);
let fill = Mul(bcx, len, vt.llunit_size);

if vt.llunit_alloc_size == 0 {
// Special-case vectors with elements of size 0 so they don't go out of bounds (#9890)
iter_vec_loop(bcx, data_ptr, &vt, fill, f)
iter_vec_loop(bcx, data_ptr, &vt, len, f)
} else {
// Calculate the last pointer address we want to handle.
// FIXME (#3729): Optimize this when the size of the unit type is
// statically known to not use pointer casts, which tend to confuse
// LLVM.
let data_end_ptr = pointer_add_byte(bcx, data_ptr, fill);
let data_end_ptr = InBoundsGEP(bcx, data_ptr, &[len]);

// Now perform the iteration.
let header_bcx = fcx.new_temp_block("iter_vec_loop_header");
Expand Down
19 changes: 19 additions & 0 deletions src/test/run-pass/zero_sized_subslice_match.rs
@@ -0,0 +1,19 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.

fn main() {
let x = [(), ()];

// The subslice used to go out of bounds for zero-sized array items, check that this doesn't
// happen anymore
match x {
[_, y..] => assert_eq!(&x[1] as *const _, &y[0] as *const _)
}
}

0 comments on commit 56671cb

Please sign in to comment.