@@ -130,6 +130,7 @@ functions.
use prelude::*;
use i16;
use num::Float;
use num::{ConvertSign, Widen};
use slice::bytes;
pub use self::decoder::{decode, DecodableFloat, FullDecoded, Decoded};

@@ -287,18 +288,20 @@ fn digits_to_dec_str<'a>(buf: &'a [u8], exp: i16, frac_digits: usize,

if exp <= 0 {
// the decimal point is before rendered digits: [0.][000...000][1234][____]
let minus_exp = -(exp as i32) as usize;
let minus_exp = -(exp as i32);
parts[0] = Part::Copy(b"0.");
parts[1] = Part::Zero(minus_exp);
parts[1] = Part::Zero(minus_exp.as_unsigned().widen());
parts[2] = Part::Copy(buf);
if frac_digits > buf.len() && frac_digits - buf.len() > minus_exp {
parts[3] = Part::Zero((frac_digits - buf.len()) - minus_exp);
if frac_digits > buf.len() && frac_digits - buf.len()
> minus_exp.as_unsigned().widen() {
parts[3] = Part::Zero((frac_digits - buf.len())
- minus_exp.as_unsigned().widen_(0usize));
&parts[..4]
} else {
&parts[..3]
}
} else {
let exp = exp as usize;
let exp = exp.as_unsigned().widen_(0usize);
if exp < buf.len() {
// the decimal point is inside rendered digits: [12][.][34][____]
parts[0] = Part::Copy(&buf[..exp]);
@@ -528,7 +531,7 @@ pub fn to_shortest_exp_str<'a, T, F>(mut format_shortest: F, v: T,
/// 826 bytes of buffer should be sufficient for `f64`. Compare this with
/// the actual number for the worst case: 770 bytes (when `exp = -1074`).
fn estimate_max_buf_len(exp: i16) -> usize {
21 + ((if exp < 0 { -12 } else { 5 } * exp as i32) as usize >> 4)
21 + ((if exp < 0 { -12 } else { 5 } * exp as i32).as_unsigned().widen_(0usize) >> 4)
}

/// Formats given floating point number into the exponential form with
@@ -17,6 +17,7 @@ Almost direct (but slightly optimized) Rust translation of Figure 3 of [1].

use prelude::*;
use num::Float;
use num::{ConvertSign, Widen};
use cmp::Ordering;

use num::flt2dec::{Decoded, MAX_SIG_DIGITS, round_up};
@@ -114,20 +115,20 @@ pub fn format_shortest(d: &Decoded, buf: &mut [u8]) -> (/*#digits*/ usize, /*exp
let mut plus = Big::from_u64(d.plus);
let mut scale = Big::from_small(1);
if d.exp < 0 {
scale.mul_pow2(-d.exp as usize);
scale.mul_pow2((-d.exp).as_unsigned().widen());
} else {
mant.mul_pow2(d.exp as usize);
minus.mul_pow2(d.exp as usize);
plus.mul_pow2(d.exp as usize);
mant.mul_pow2(d.exp.as_unsigned().widen());
minus.mul_pow2(d.exp.as_unsigned().widen());
plus.mul_pow2(d.exp.as_unsigned().widen());
}

// divide `mant` by `10^k`. now `scale / 10 < mant + plus <= scale * 10`.
if k >= 0 {
mul_pow10(&mut scale, k as usize);
mul_pow10(&mut scale, k.as_unsigned().widen());
} else {
mul_pow10(&mut mant, -k as usize);
mul_pow10(&mut minus, -k as usize);
mul_pow10(&mut plus, -k as usize);
mul_pow10(&mut mant, (-k).as_unsigned().widen());
mul_pow10(&mut minus, (-k).as_unsigned().widen());
mul_pow10(&mut plus, (-k).as_unsigned().widen());
}

// fixup when `mant + plus > scale` (or `>=`).
@@ -242,16 +243,16 @@ pub fn format_exact(d: &Decoded, buf: &mut [u8], limit: i16) -> (/*#digits*/ usi
let mut mant = Big::from_u64(d.mant);
let mut scale = Big::from_small(1);
if d.exp < 0 {
scale.mul_pow2(-d.exp as usize);
scale.mul_pow2((-d.exp).as_unsigned().widen());
} else {
mant.mul_pow2(d.exp as usize);
mant.mul_pow2(d.exp.as_unsigned().widen());
}

// divide `mant` by `10^k`. now `scale / 10 < mant <= scale * 10`.
if k >= 0 {
mul_pow10(&mut scale, k as usize);
mul_pow10(&mut scale, k.as_unsigned().widen());
} else {
mul_pow10(&mut mant, -k as usize);
mul_pow10(&mut mant, (-k).as_unsigned().widen());
}

// fixup when `mant + plus >= scale`, where `plus / scale = 10^-buf.len() / 2`.
@@ -274,8 +275,8 @@ pub fn format_exact(d: &Decoded, buf: &mut [u8], limit: i16) -> (/*#digits*/ usi
// we return an empty buffer, with an exception of the later rounding-up case
// which occurs when `k == limit` and has to produce exactly one digit.
0
} else if ((k as i32 - limit as i32) as usize) < buf.len() {
(k - limit) as usize
} else if (k as i32 - limit as i32).as_unsigned().widen_(0usize) < buf.len() {
(k - limit).as_unsigned().widen()
} else {
buf.len()
};
@@ -18,6 +18,7 @@ Rust adaptation of Grisu3 algorithm described in [1]. It uses about

use prelude::*;
use num::Float;
use num::{ConvertSign, Widen};

use num::flt2dec::{Decoded, MAX_SIG_DIGITS, round_up};

@@ -68,7 +69,7 @@ impl Fp {
fn normalize_to(&self, e: i16) -> Fp {
let edelta = self.e - e;
assert!(edelta >= 0);
let edelta = edelta as usize;
let edelta = edelta;
assert_eq!(self.f << edelta >> edelta, self.f);
Fp { f: self.f << edelta, e: e }
}
@@ -182,7 +183,7 @@ pub fn cached_power(alpha: i16, gamma: i16) -> (i16, Fp) {
let range = (CACHED_POW10.len() as i32) - 1;
let domain = (CACHED_POW10_LAST_E - CACHED_POW10_FIRST_E) as i32;
let idx = ((gamma as i32) - offset) * range / domain;
let (f, e, k) = CACHED_POW10[idx as usize];
let (f, e, k) = CACHED_POW10[idx.as_unsigned().widen_(0usize)];
debug_assert!(alpha <= e && e <= gamma);
(k, Fp { f: f, e: e })
}
@@ -276,7 +277,7 @@ pub fn format_shortest_opt(d: &Decoded,
// let plus0 = plus.f - 1; // only for explanation
// let minus0 = minus.f + 1; // only for explanation
let minus1 = minus.f - 1;
let e = -plus.e as usize; // shared exponent
let e = -plus.e; // shared exponent

// divide `plus1` into integral and fractional parts.
// integral parts are guaranteed to fit in u32, since cached power guarantees `plus < 2^32`
@@ -300,7 +301,7 @@ pub fn format_shortest_opt(d: &Decoded,
// (e.g. `x` = 32000, `y` = 32777; `kappa` = 2 since `y mod 10^3 = 777 < y - x = 777`.)
// the algorithm relies on the later verification phase to exclude `y`.
let delta1 = plus1 - minus1;
// let delta1int = (delta1 >> e) as usize; // only for explanation
// let delta1int = (delta1 >> e); // only for explanation
let delta1frac = delta1 & ((1 << e) - 1);

// render integral parts, while checking for the accuracy at each step.
@@ -329,7 +330,7 @@ pub fn format_shortest_opt(d: &Decoded,

// break the loop when we have rendered all integral digits.
// the exact number of digits is `max_kappa + 1` as `plus1 < 10^(max_kappa+1)`.
if i > max_kappa as usize {
if i > max_kappa.widen() {
debug_assert_eq!(ten_kappa, 1);
debug_assert_eq!(kappa, 0);
break;
@@ -505,7 +506,7 @@ pub fn format_exact_opt(d: &Decoded, buf: &mut [u8], limit: i16)
let v = v.mul(&cached);

// divide `v` into integral and fractional parts.
let e = -v.e as usize;
let e = -v.e;
let vint = (v.f >> e) as u32;
let vfrac = v.f & ((1 << e) - 1);

@@ -542,8 +543,8 @@ pub fn format_exact_opt(d: &Decoded, buf: &mut [u8], limit: i16)
// this will increase the false negative rate, but only very, *very* slightly;
// it can only matter noticably when the mantissa is bigger than 60 bits.
return possibly_round(buf, 0, exp, limit, v.f / 10, (max_ten_kappa as u64) << e, err << e);
} else if ((exp as i32 - limit as i32) as usize) < buf.len() {
(exp - limit) as usize
} else if (exp as i32 - limit as i32).as_unsigned().widen_(0usize) < buf.len() {
(exp - limit).as_unsigned().widen()
} else {
buf.len()
};
@@ -575,7 +576,7 @@ pub fn format_exact_opt(d: &Decoded, buf: &mut [u8], limit: i16)

// break the loop when we have rendered all integral digits.
// the exact number of digits is `max_kappa + 1` as `plus1 < 10^(max_kappa+1)`.
if i > max_kappa as usize {
if i > max_kappa.widen() {
debug_assert_eq!(ten_kappa, 1);
debug_assert_eq!(kappa, 0);
break;
@@ -19,7 +19,7 @@ use char::CharExt;
use cmp::{Eq, PartialOrd};
use fmt;
use intrinsics;
use marker::Copy;
use marker::{Copy, Sized};
use mem::size_of;
use option::Option::{self, Some, None};
use result::Result::{self, Ok, Err};
@@ -1142,7 +1142,7 @@ macro_rules! uint_impl {
pub fn next_power_of_two(self) -> Self {
let bits = size_of::<Self>() * 8;
let one: Self = Self::one();
one << ((bits - self.wrapping_sub(one).leading_zeros() as usize) % bits)
one << ((bits - self.wrapping_sub(one).leading_zeros().widen_(0usize)) % bits)
}

/// Returns the smallest power of two greater than or equal to `n`. If
@@ -1560,3 +1560,120 @@ impl fmt::Display for ParseFloatError {
self.__description().fmt(f)
}
}

pub trait Widen<Target>: Sized {
#[stable(feature = "rust1", since = "1.0.0")]
fn widen(self) -> Target;
#[stable(feature = "rust1", since = "1.0.0")]
fn widen_(self, _: Target) -> Target {
self.widen()
}
}
pub trait Truncate<Target>: Sized {
#[stable(feature = "rust1", since = "1.0.0")]
fn truncate(self) -> Target;
#[stable(feature = "rust1", since = "1.0.0")]
fn truncate_(self, _: Target) -> Target {
self.truncate()
}
}

pub trait ConvertSign: Sized {
type TargetSigned;
type TargetUnsigned;
#[stable(feature = "rust1", since = "1.0.0")]
fn as_signed(self) -> Self::TargetSigned;
#[stable(feature = "rust1", since = "1.0.0")]
fn as_unsigned(self) -> Self::TargetUnsigned;
}

macro_rules! impl_conv {
($Small: ty, $Large: ty) => {
impl Widen<$Large> for $Small {
#[inline]
fn widen(self) -> $Large {
self as $Large
}
}
impl Truncate<$Small> for $Large {
#[inline]
fn truncate(self) -> $Small {
self as $Small
}
}
}
}

macro_rules! impl_conv_sign {
($Signed: ty, $Unsigned: ty) => {
impl ConvertSign for $Signed {
type TargetSigned = $Signed;
type TargetUnsigned = $Unsigned;
#[inline]
fn as_signed(self) -> Self::TargetSigned {
self as $Signed
}
#[inline]
fn as_unsigned(self) -> Self::TargetUnsigned {
self as $Unsigned
}
}
impl ConvertSign for $Unsigned {
type TargetSigned = $Signed;
type TargetUnsigned = $Unsigned;
#[inline]
fn as_signed(self) -> Self::TargetSigned {
self as $Signed
}
#[inline]
fn as_unsigned(self) -> Self::TargetUnsigned {
self as $Unsigned
}
}
}
}

impl_conv! { u8, u8 }
impl_conv! { u8, u16 }
impl_conv! { u8, u32 }
impl_conv! { u8, u64 }
impl_conv! { u8, usize }
impl_conv! { u16, u16 }
impl_conv! { u16, u32 }
impl_conv! { u16, u64 }
impl_conv! { u16, usize }
impl_conv! { u32, u32 }
impl_conv! { u32, u64 }
impl_conv! { u32, usize }
impl_conv! { u64, u64 }
#[cfg(target_pointer_width = "64")]
impl_conv! { u64, usize }
#[cfg(target_pointer_width = "32")]
impl_conv! { usize, u32 }
impl_conv! { usize, u64 }
impl_conv! { usize, usize }
impl_conv! { i8, i8 }
impl_conv! { i8, i16 }
impl_conv! { i8, i32 }
impl_conv! { i8, i64 }
impl_conv! { i8, isize }
impl_conv! { i16, i16 }
impl_conv! { i16, i32 }
impl_conv! { i16, i64 }
impl_conv! { i16, isize }
impl_conv! { i32, i32 }
impl_conv! { i32, i64 }
impl_conv! { i32, isize }
impl_conv! { i64, i64 }
#[cfg(target_pointer_width = "64")]
impl_conv! { i64, isize }
#[cfg(target_pointer_width = "32")]
impl_conv! { isize, i32 }
impl_conv! { isize, i64 }
impl_conv! { isize, isize }

impl_conv_sign! { i8, u8 }
impl_conv_sign! { i16, u16 }
impl_conv_sign! { i32, u32 }
impl_conv_sign! { i64, u64 }
impl_conv_sign! { isize, usize }
@@ -12,6 +12,7 @@
#![allow(deprecated)]

use super::Wrapping;
use num::Widen;

use ops::*;

@@ -298,47 +299,47 @@ impl OverflowingOps for usize {
fn overflowing_add(self, rhs: usize) -> (usize, bool) {
unsafe {
let res = u64_add_with_overflow(self as u64, rhs as u64);
(res.0 as usize, res.1)
(res.0.widen(), res.1)
}
}
#[inline(always)]
fn overflowing_sub(self, rhs: usize) -> (usize, bool) {
unsafe {
let res = u64_sub_with_overflow(self as u64, rhs as u64);
(res.0 as usize, res.1)
(res.0.widen(), res.1)
}
}
#[inline(always)]
fn overflowing_mul(self, rhs: usize) -> (usize, bool) {
unsafe {
let res = u64_mul_with_overflow(self as u64, rhs as u64);
(res.0 as usize, res.1)
(res.0.widen(), res.1)
}
}
#[inline(always)]
fn overflowing_div(self, rhs: usize) -> (usize, bool) {
let (r, f) = (self as u64).overflowing_div(rhs as u64);
(r as usize, f)
(r.widen(), f)
}
#[inline(always)]
fn overflowing_rem(self, rhs: usize) -> (usize, bool) {
let (r, f) = (self as u64).overflowing_rem(rhs as u64);
(r as usize, f)
(r.widen(), f)
}
#[inline(always)]
fn overflowing_neg(self) -> (usize, bool) {
let (r, f) = (self as u64).overflowing_neg();
(r as usize, f)
(r.widen(), f)
}
#[inline(always)]
fn overflowing_shl(self, rhs: u32) -> (usize, bool) {
let (r, f) = (self as u64).overflowing_shl(rhs);
(r as usize, f)
(r.widen(), f)
}
#[inline(always)]
fn overflowing_shr(self, rhs: u32) -> (usize, bool) {
let (r, f) = (self as u64).overflowing_shr(rhs);
(r as usize, f)
(r.widen(), f)
}
}

@@ -348,47 +349,47 @@ impl OverflowingOps for usize {
fn overflowing_add(self, rhs: usize) -> (usize, bool) {
unsafe {
let res = u32_add_with_overflow(self as u32, rhs as u32);
(res.0 as usize, res.1)
(res.0.widen(), res.1)
}
}
#[inline(always)]
fn overflowing_sub(self, rhs: usize) -> (usize, bool) {
unsafe {
let res = u32_sub_with_overflow(self as u32, rhs as u32);
(res.0 as usize, res.1)
(res.0.widen(), res.1)
}
}
#[inline(always)]
fn overflowing_mul(self, rhs: usize) -> (usize, bool) {
unsafe {
let res = u32_mul_with_overflow(self as u32, rhs as u32);
(res.0 as usize, res.1)
(res.0.widen(), res.1)
}
}
#[inline(always)]
fn overflowing_div(self, rhs: usize) -> (usize, bool) {
let (r, f) = (self as u32).overflowing_div(rhs as u32);
(r as usize, f)
(r.widen(), f)
}
#[inline(always)]
fn overflowing_rem(self, rhs: usize) -> (usize, bool) {
let (r, f) = (self as u32).overflowing_rem(rhs as u32);
(r as usize, f)
(r.widen(), f)
}
#[inline(always)]
fn overflowing_neg(self) -> (usize, bool) {
let (r, f) = (self as u32).overflowing_neg();
(r as usize, f)
(r.widen(), f)
}
#[inline(always)]
fn overflowing_shl(self, rhs: u32) -> (usize, bool) {
let (r, f) = (self as u32).overflowing_shl(rhs);
(r as usize, f)
(r.widen(), f)
}
#[inline(always)]
fn overflowing_shr(self, rhs: u32) -> (usize, bool) {
let (r, f) = (self as u32).overflowing_shr(rhs);
(r as usize, f)
(r.widen(), f)
}
}

@@ -33,6 +33,7 @@ use raw::{Repr, Slice};
use result::Result::{self, Ok, Err};
use slice::{self, SliceExt};
use usize;
use num::Widen;

pub mod pattern;

@@ -969,7 +970,7 @@ impl TwoWaySearcher {

// This isn't in the original algorithm, as far as I'm aware.
let byteset = needle.iter()
.fold(0, |a, &b| (1 << ((b & 0x3f) as usize)) | a);
.fold(0, |a, &b| (1 << ((b & 0x3f))) | a);

// A particularly readable explanation of what's going on here can be found
// in Crochemore and Rytter's book "Text Algorithms", ch 13. Specifically
@@ -1018,7 +1019,7 @@ impl TwoWaySearcher {
// Quickly skip by large portions unrelated to our substring
if (self.byteset >>
((haystack[self.position + needle.len() - 1] & 0x3f)
as usize)) & 1 == 0 {
)) & 1 == 0 {
self.position += needle.len();
if !long_period {
self.memory = 0;
@@ -1236,7 +1237,7 @@ fn run_utf8_validation_iterator(iter: &mut slice::Iter<u8>)
// ASCII characters are always valid, so only large
// bytes need more examination.
if first >= 128 {
let w = UTF8_CHAR_WIDTH[first as usize];
let w = UTF8_CHAR_WIDTH[first.widen_(0usize)];
let second = next!();
// 2-byte encoding is for codepoints \u{0080} to \u{07ff}
// first C2 80 last DF BF
@@ -1766,7 +1767,7 @@ impl StrExt for str {
}

let first= s.as_bytes()[i];
let w = UTF8_CHAR_WIDTH[first as usize];
let w = UTF8_CHAR_WIDTH[first.widen_(0usize)];
assert!(w != 0);

let mut val = utf8_first_byte(first, w as u32);
@@ -1866,15 +1867,15 @@ pub fn char_range_at_raw(bytes: &[u8], i: usize) -> (u32, usize) {
// Multibyte case is a fn to allow char_range_at to inline cleanly
fn multibyte_char_range_at(bytes: &[u8], i: usize) -> (u32, usize) {
let first = bytes[i];
let w = UTF8_CHAR_WIDTH[first as usize];
let w = UTF8_CHAR_WIDTH[first.widen_(0usize)];
assert!(w != 0);

let mut val = utf8_first_byte(first, w as u32);
val = utf8_acc_cont_byte(val, bytes[i + 1]);
if w > 2 { val = utf8_acc_cont_byte(val, bytes[i + 2]); }
if w > 3 { val = utf8_acc_cont_byte(val, bytes[i + 3]); }

return (val, i + w as usize);
return (val, i + w.widen_(0usize));
}

multibyte_char_range_at(bytes, i)
@@ -947,7 +947,7 @@ fn test_range() {
assert_eq!((-70..58i8).size_hint(), (128, Some(128)));
assert_eq!((-128..127i8).size_hint(), (255, Some(255)));
assert_eq!((-2..isize::MAX).size_hint(),
(isize::MAX as usize + 2, Some(isize::MAX as usize + 2)));
(isize::MAX.as_unsigned() + 2, Some(isize::MAX.as_unsigned() + 2)));
}

#[test]
@@ -108,7 +108,7 @@ fn deflate_bytes_internal(bytes: &[u8], flags: c_int) -> Bytes {
assert!(!res.is_null());
Bytes {
ptr: Unique::new(res as *mut u8),
len: outsz as usize,
len: outsz.widen(),
}
}
}
@@ -133,7 +133,7 @@ fn inflate_bytes_internal(bytes: &[u8], flags: c_int) -> Result<Bytes,Error> {
if !res.is_null() {
Ok(Bytes {
ptr: Unique::new(res as *mut u8),
len: outsz as usize,
len: outsz.widen(),
})
} else {
Err(Error::new())
@@ -422,7 +422,7 @@ impl<'a> Parser<'a> {
Some((_, c)) => {
match c.to_digit(10) {
Some(i) => {
cur = cur * 10 + i as usize;
cur = cur * 10 + i.widen_(0usize);
found = true;
self.cur.next();
}
@@ -384,7 +384,7 @@ impl<'a> Id<'a> {
is_letter_or_underscore(c) || in_range('0', c, '9')
}
fn in_range(low: char, c: char, high: char) -> bool {
low as usize <= c as usize && c as usize <= high as usize
low <= c && c <= high
}
}

@@ -246,7 +246,7 @@ pub struct LogLevel(pub u32);
impl fmt::Display for LogLevel {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let LogLevel(level) = *self;
match LOG_LEVEL_NAMES.get(level as usize - 1) {
match LOG_LEVEL_NAMES.get(level.widen_(0usize) - 1) {
Some(ref name) => fmt::Display::fmt(name, fmt),
None => fmt::Display::fmt(&level, fmt)
}
@@ -19,6 +19,7 @@

use core::prelude::*;
use core::num::Float;
use core::num::Truncate;
use core::marker::PhantomData;

use {Rng, Rand};
@@ -220,7 +221,7 @@ fn ziggurat<R: Rng, P, Z>(
// this may be slower than it would be otherwise.)
// FIXME: investigate/optimise for the above.
let bits: u64 = rng.gen();
let i = (bits & 0xff) as usize;
let i = (bits & 0xff).truncate_(0usize);
let f = (bits >> 11) as f64 / SCALE;

// u is either U(-1, 1) or U(0, 1) depending on if this is a
@@ -343,7 +344,7 @@ mod tests {
}
#[test] #[should_panic]
fn test_weighted_choice_weight_overflows() {
let x = (!0) as usize / 2; // x + x + 2 is the overflow
let x = (!0) / 2; // x + x + 2 is the overflow
WeightedChoice::new(&mut [Weighted { weight: x, item: 0 },
Weighted { weight: 1, item: 1 },
Weighted { weight: x, item: 2 },
@@ -16,6 +16,7 @@ use core::prelude::*;
use core::slice;
use core::iter::repeat;
use core::num::Wrapping as w;
use core::num::{Truncate, Widen};

use {Rng, SeedableRng, Rand};

@@ -136,7 +137,7 @@ impl IsaacRng {
const MIDPOINT: usize = RAND_SIZE_USIZE / 2;

macro_rules! ind {
($x:expr) => (self.mem[($x >> 2).0 as usize & (RAND_SIZE_USIZE - 1)] )
($x:expr) => (self.mem[($x >> 2).0.widen_(0usize) & (RAND_SIZE_USIZE - 1)] )
}

let r = [(0, MIDPOINT), (MIDPOINT, 0)];
@@ -213,7 +214,7 @@ impl Rng for IsaacRng {
// (the % is cheaply telling the optimiser that we're always
// in bounds, without unsafe. NB. this is a power of two, so
// it optimises to a bitwise mask).
self.rsl[(self.cnt % RAND_SIZE) as usize].0
self.rsl[(self.cnt % RAND_SIZE).widen_(0usize)].0
}
}

@@ -375,7 +376,7 @@ impl Isaac64Rng {
const MP_VEC: [(usize, usize); 2] = [(0,MIDPOINT), (MIDPOINT, 0)];
macro_rules! ind {
($x:expr) => {
*self.mem.get_unchecked((($x >> 3).0 as usize) & (RAND_SIZE_64 - 1))
*self.mem.get_unchecked((($x >> 3).0.truncate_(0usize)) & (RAND_SIZE_64 - 1))
}
}

@@ -456,7 +457,7 @@ impl Rng for Isaac64Rng {
// See corresponding location in IsaacRng.next_u32 for
// explanation.
debug_assert!(self.cnt < RAND_SIZE_64);
self.rsl[(self.cnt % RAND_SIZE_64) as usize].0
self.rsl[(self.cnt % RAND_SIZE_64)].0
}
}

@@ -14,6 +14,7 @@ use core::prelude::*;
use core::char;
use core::isize;
use core::usize;
use core::num::{Truncate, Widen};

use {Rand,Rng};

@@ -60,9 +61,9 @@ impl Rand for usize {
#[inline]
fn rand<R: Rng>(rng: &mut R) -> usize {
if usize::BITS == 32 {
rng.gen::<u32>() as usize
rng.gen::<u32>().widen()
} else {
rng.gen::<u64>() as usize
rng.gen::<u64>().truncate()
}
}
}
@@ -272,11 +272,11 @@ pub mod reader {
}

pub fn tag_at(data: &[u8], start: usize) -> DecodeResult<Res> {
let v = data[start] as usize;
let v = data[start].widen();
if v < 0xf0 {
Ok(Res { val: v, next: start + 1 })
} else if v > 0xf0 {
Ok(Res { val: ((v & 0xf) << 8) | data[start + 1] as usize, next: start + 2 })
Ok(Res { val: ((v & 0xf) << 8) | data[start + 1].widen_(0usize), next: start + 2 })
} else {
// every tag starting with byte 0xf0 is an overlong form, which is prohibited.
Err(InvalidTag(v))
@@ -287,27 +287,27 @@ pub mod reader {
fn vuint_at_slow(data: &[u8], start: usize) -> DecodeResult<Res> {
let a = data[start];
if a & 0x80 != 0 {
return Ok(Res {val: (a & 0x7f) as usize, next: start + 1});
return Ok(Res {val: (a & 0x7f).widen_(0usize), next: start + 1});
}
if a & 0x40 != 0 {
return Ok(Res {val: ((a & 0x3f) as usize) << 8 |
(data[start + 1] as usize),
return Ok(Res {val: (a & 0x3f).widen_(0usize) << 8 |
data[start + 1].widen_(0usize),
next: start + 2});
}
if a & 0x20 != 0 {
return Ok(Res {val: ((a & 0x1f) as usize) << 16 |
(data[start + 1] as usize) << 8 |
(data[start + 2] as usize),
return Ok(Res {val: (a & 0x1f).widen_(0usize) << 16 |
data[start + 1].widen_(0usize) << 8 |
data[start + 2].widen_(0usize),
next: start + 3});
}
if a & 0x10 != 0 {
return Ok(Res {val: ((a & 0x0f) as usize) << 24 |
(data[start + 1] as usize) << 16 |
(data[start + 2] as usize) << 8 |
(data[start + 3] as usize),
return Ok(Res {val: (a & 0x0f).widen_(0usize) << 24 |
data[start + 1].widen_(0usize) << 16 |
data[start + 2].widen_(0usize) << 8 |
data[start + 3].widen_(0usize),
next: start + 4});
}
Err(IntTooBig(a as usize))
Err(IntTooBig(a.widen()))
}

pub fn vuint_at(data: &[u8], start: usize) -> DecodeResult<Res> {
@@ -346,18 +346,18 @@ pub mod reader {
let ptr = data.as_ptr().offset(start as isize) as *const u32;
let val = u32::from_be(*ptr);

let i = (val >> 28) as usize;
let i = (val >> 28).widen_(0usize);
let (shift, mask) = SHIFT_MASK_TABLE[i];
Ok(Res {
val: ((val >> shift) & mask) as usize,
val: ((val >> shift) & mask).widen(),
next: start + ((32 - shift) >> 3),
})
}
}

pub fn tag_len_at(data: &[u8], tag: Res) -> DecodeResult<Res> {
if tag.val < NUM_IMPLICIT_TAGS && TAG_IMPLICIT_LEN[tag.val] >= 0 {
Ok(Res { val: TAG_IMPLICIT_LEN[tag.val] as usize, next: tag.next })
Ok(Res { val: TAG_IMPLICIT_LEN[tag.val].as_unsigned().widen(), next: tag.next })
} else {
vuint_at(data, tag.next)
}
@@ -527,7 +527,7 @@ pub mod reader {
r_tag,
r_doc.start,
r_doc.end);
if r_tag != (exp_tag as usize) {
if r_tag != exp_tag as usize {
return Err(Expected(format!("expected EBML doc with tag {:?} but \
found tag {:?}", exp_tag, r_tag)));
}
@@ -563,9 +563,9 @@ pub mod reader {
let TaggedDoc { tag: r_tag, doc: r_doc } =
try!(doc_at(self.parent.data, self.pos));
let r = if r_tag == (EsSub8 as usize) {
doc_as_u8(r_doc) as usize
doc_as_u8(r_doc).widen()
} else if r_tag == (EsSub32 as usize) {
doc_as_u32(r_doc) as usize
doc_as_u32(r_doc).widen()
} else {
return Err(Expected(format!("expected EBML doc with tag {:?} or {:?} but \
found tag {:?}", EsSub8, EsSub32, r_tag)));
@@ -644,9 +644,9 @@ pub mod reader {
fn read_uint(&mut self) -> DecodeResult<usize> {
let v = try!(self._next_int(EsU8, EsU64));
if v > (::std::usize::MAX as u64) {
Err(IntTooBig(v as usize))
Err(IntTooBig(v.truncate()))
} else {
Ok(v as usize)
Ok(v.truncate())
}
}

@@ -658,7 +658,7 @@ pub mod reader {
let v = try!(self._next_int(EsI8, EsI64)) as i64;
if v > (isize::MAX as i64) || v < (isize::MIN as i64) {
debug!("FIXME \\#6122: Removing this makes this function miscompile");
Err(IntTooBig(v as usize))
Err(IntTooBig(v.as_unsigned().truncate()))
} else {
Ok(v as isize)
}
@@ -952,7 +952,7 @@ pub mod writer {
let last_size_pos = self.size_positions.pop().unwrap();
let cur_pos = try!(self.writer.seek(SeekFrom::Current(0)));
try!(self.writer.seek(SeekFrom::Start(last_size_pos)));
let size = (cur_pos - last_size_pos - 4) as usize;
let size = (cur_pos - last_size_pos - 4).truncate();

// relax the size encoding for small tags (bigger tags are costly to move).
// we should never try to move the stable positions, however.
@@ -961,8 +961,8 @@ pub mod writer {
// we can't alter the buffer in place, so have a temporary buffer
let mut buf = [0u8; RELAX_MAX_SIZE];
{
let last_size_pos = last_size_pos as usize;
let data = &self.writer.get_ref()[last_size_pos+4..cur_pos as usize];
let last_size_pos = last_size_pos.truncate_(0usize);
let data = &self.writer.get_ref()[last_size_pos+4..cur_pos.truncate()];
bytes::copy_memory(data, &mut buf);
}

@@ -1098,9 +1098,9 @@ pub mod writer {
impl<'a> Encoder<'a> {
// used internally to emit things like the vector length and so on
fn _emit_tagged_sub(&mut self, v: usize) -> EncodeResult {
if v as u8 as usize == v {
if (v as u8).widen_(0usize) == v {
self.wr_tagged_raw_u8(EsSub8 as usize, v as u8)
} else if v as u32 as usize == v {
} else if (v as u32).widen_(0usize) == v {
self.wr_tagged_raw_u32(EsSub32 as usize, v as u32)
} else {
Err(io::Error::new(io::ErrorKind::Other,
@@ -255,7 +255,7 @@ in a non-constant integer which lead to this error. Example:
```
const X: u32 = 1;
const Y: usize = &X as *const u32 as usize;
const Y: usize = &X as *const u32;
println!("{}", Y);
```
"##,
@@ -264,12 +264,12 @@ impl MetadataBlob {
if slice.len() < 4 {
&[] // corrupt metadata
} else {
let len = (((slice[0] as u32) << 24) |
let len = ((slice[0] as u32) << 24) |
((slice[1] as u32) << 16) |
((slice[2] as u32) << 8) |
((slice[3] as u32) << 0)) as usize;
if len + 4 <= slice.len() {
&slice[4.. len + 4]
((slice[3] as u32) << 0);
if len.widen_(0usize) + 4 <= slice.len() {
&slice[4.. len.widen_(0usize) + 4]
} else {
&[] // corrupt or old metadata
}
@@ -72,15 +72,15 @@ fn lookup_hash<'a, F>(d: rbml::Doc<'a>, mut eq_fn: F, hash: u64) -> Option<rbml:
{
let index = reader::get_doc(d, tag_index);
let table = reader::get_doc(index, tag_index_table);
let hash_pos = table.start + (hash % 256 * 4) as usize;
let pos = u32_from_be_bytes(&d.data[hash_pos..]) as usize;
let tagged_doc = reader::doc_at(d.data, pos).unwrap();
let hash_pos = table.start + (hash % 256 * 4).truncate_(0usize);
let pos = u32_from_be_bytes(&d.data[hash_pos..]);
let tagged_doc = reader::doc_at(d.data, pos.widen()).unwrap();

reader::tagged_docs(tagged_doc.doc, tag_index_buckets_bucket_elt).find(|elt| {
eq_fn(&elt.data[elt.start + 4 .. elt.end])
}).map(|elt| {
let pos = u32_from_be_bytes(&elt.data[elt.start..]) as usize;
reader::doc_at(d.data, pos).unwrap().doc
let pos = u32_from_be_bytes(&elt.data[elt.start..]);
reader::doc_at(d.data, pos.widen()).unwrap().doc
})
}

@@ -544,12 +544,12 @@ pub fn each_lang_item<F>(cdata: Cmd, mut f: F) -> bool where
let lang_items = reader::get_doc(root, tag_lang_items);
reader::tagged_docs(lang_items, tag_lang_items_item).all(|item_doc| {
let id_doc = reader::get_doc(item_doc, tag_lang_items_item_id);
let id = reader::doc_as_u32(id_doc) as usize;
let id = reader::doc_as_u32(id_doc);
let node_id_doc = reader::get_doc(item_doc,
tag_lang_items_item_node_id);
let node_id = reader::doc_as_u32(node_id_doc) as ast::NodeId;

f(node_id, id)
f(node_id, id.widen())
})
}

@@ -1498,7 +1498,7 @@ fn doc_generics<'tcx>(base_doc: rbml::Doc,
let def_id = translated_def_id(cdata, def_id_doc);

let doc = reader::get_doc(rp_doc, tag_region_param_def_space);
let space = subst::ParamSpace::from_uint(reader::doc_as_u64(doc) as usize);
let space = subst::ParamSpace::from_uint(reader::doc_as_u64(doc).truncate());

let doc = reader::get_doc(rp_doc, tag_region_param_def_index);
let index = reader::doc_as_u64(doc) as u32;
@@ -1529,7 +1529,7 @@ fn doc_predicates<'tcx>(base_doc: rbml::Doc,
let mut predicates = subst::VecPerParamSpace::empty();
for predicate_doc in reader::tagged_docs(doc, tag_predicate) {
let space_doc = reader::get_doc(predicate_doc, tag_predicate_space);
let space = subst::ParamSpace::from_uint(reader::doc_as_u8(space_doc) as usize);
let space = subst::ParamSpace::from_uint(reader::doc_as_u8(space_doc).widen());

let data_doc = reader::get_doc(predicate_doc, tag_predicate_data);
let data = parse_predicate_data(data_doc.data, data_doc.start, cdata.cnum, tcx,
@@ -1638,7 +1638,7 @@ fn encode_index<T, F>(rbml_w: &mut Encoder, index: Vec<entry<T>>, mut write_fn:
for elt in index {
let mut s = SipHasher::new();
elt.val.hash(&mut s);
let h = s.finish() as usize;
let h = s.finish().truncate_(0usize);
(&mut buckets[h % 256]).push(elt);
}

@@ -2045,10 +2045,10 @@ pub fn encode_metadata(parms: EncodeParams, krate: &ast::Crate) -> Vec<u8> {

// RBML compacts the encoded bytes whenever appropriate,
// so there are some garbages left after the end of the data.
let metalen = wr.seek(SeekFrom::Current(0)).unwrap() as usize;
let metalen = wr.seek(SeekFrom::Current(0)).unwrap();
let mut v = wr.into_inner();
v.truncate(metalen);
assert_eq!(v.len(), metalen);
v.truncate(metalen.truncate());
assert_eq!(v.len(), metalen.truncate_(0usize));

// And here we run into yet another obscure archive bug: in which metadata
// loaded from archives may have trailing garbage bytes. Awhile back one of
@@ -766,17 +766,17 @@ fn get_metadata_section_imp(target: &Target, filename: &Path)
let mut name_buf = ptr::null();
let name_len = llvm::LLVMRustGetSectionName(si.llsi, &mut name_buf);
let name = slice::from_raw_parts(name_buf as *const u8,
name_len as usize).to_vec();
name_len.as_unsigned().widen()).to_vec();
let name = String::from_utf8(name).unwrap();
debug!("get_metadata_section: name {}", name);
if read_meta_section_name(target) == name {
let cbuf = llvm::LLVMGetSectionContents(si.llsi);
let csz = llvm::LLVMGetSectionSize(si.llsi) as usize;
let csz = llvm::LLVMGetSectionSize(si.llsi);
let cvbuf: *const u8 = cbuf as *const u8;
let vlen = encoder::metadata_encoding_version.len();
debug!("checking {} bytes of metadata-version stamp",
vlen);
let minsz = cmp::min(vlen, csz);
let minsz = cmp::min(vlen, csz.truncate());
let buf0 = slice::from_raw_parts(cvbuf, minsz);
let version_ok = buf0 == encoder::metadata_encoding_version;
if !version_ok {
@@ -786,8 +786,8 @@ fn get_metadata_section_imp(target: &Target, filename: &Path)

let cvbuf1 = cvbuf.offset(vlen as isize);
debug!("inflating {} bytes of compressed metadata",
csz - vlen);
let bytes = slice::from_raw_parts(cvbuf1, csz - vlen);
csz.truncate_(0usize) - vlen);
let bytes = slice::from_raw_parts(cvbuf1, csz.truncate_(0usize) - vlen);
match flate::inflate_bytes(bytes) {
Ok(inflated) => return Ok(MetadataVec(inflated)),
Err(_) => {}
@@ -619,7 +619,7 @@ fn parse_uint(st: &mut PState) -> usize {
fn parse_u32(st: &mut PState) -> u32 {
let n = parse_uint(st);
let m = n as u32;
assert_eq!(m as usize, n);
assert_eq!(m.widen_(0usize), n);
m
}

@@ -422,7 +422,7 @@ impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> {
// whether the bit_index is greater than the
// actual value the user specified and stop
// iterating if so.
let bit_index = base_index + offset as usize;
let bit_index = base_index + offset;
if bit_index >= self.bits_per_id {
return true;
} else if !f(bit_index) {
@@ -172,7 +172,7 @@ fn calculate_type(sess: &session::Session,
assert!(src.rlib.is_some());
debug!("adding staticlib: {}", data.name);
add_library(sess, cnum, cstore::RequireStatic, &mut formats);
ret[cnum as usize - 1] = Some(cstore::RequireStatic);
ret[cnum.widen_(0usize) - 1] = Some(cstore::RequireStatic);
}
});

@@ -284,7 +284,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
AddVar(vid) => {
let mut var_origins = self.var_origins.borrow_mut();
var_origins.pop().unwrap();
assert_eq!(var_origins.len(), vid.index as usize);
assert_eq!(var_origins.len(), vid.index.widen_(0usize));
}
AddConstraint(ref constraint) => {
self.constraints.borrow_mut().remove(constraint);
@@ -312,7 +312,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
pub fn num_vars(&self) -> u32 {
let len = self.var_origins.borrow().len();
// enforce no overflow
assert!(len as u32 as usize == len);
assert!((len as u32).widen_(0usize) == len);
len as u32
}

@@ -557,7 +557,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
match *self.values.borrow() {
None => {
self.tcx.sess.span_bug(
(*self.var_origins.borrow())[rid.index as usize].span(),
(*self.var_origins.borrow())[rid.index.widen_(0usize)].span(),
"attempt to resolve region variable before values have \
been computed!")
}
@@ -746,7 +746,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {

(ReInfer(ReVar(v_id)), _) | (_, ReInfer(ReVar(v_id))) => {
self.tcx.sess.span_bug(
(*self.var_origins.borrow())[v_id.index as usize].span(),
(*self.var_origins.borrow())[v_id.index.widen_(0usize)].span(),
&format!("lub_concrete_regions invoked with \
non-concrete regions: {:?}, {:?}",
a,
@@ -854,7 +854,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
(ReInfer(ReVar(v_id)), _) |
(_, ReInfer(ReVar(v_id))) => {
self.tcx.sess.span_bug(
(*self.var_origins.borrow())[v_id.index as usize].span(),
(*self.var_origins.borrow())[v_id.index.widen_(0usize)].span(),
&format!("glb_concrete_regions invoked with \
non-concrete regions: {:?}, {:?}",
a,
@@ -998,7 +998,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
}

fn construct_var_data(&self) -> Vec<VarData> {
(0..self.num_vars() as usize).map(|_| {
(0..self.num_vars()).map(|_| {
VarData {
// All nodes are initially classified as contracting; during
// the expansion phase, we will shift the classification for
@@ -1027,14 +1027,14 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
.repr(self.tcx));
match *constraint {
ConstrainRegSubVar(a_region, b_vid) => {
let b_data = &mut var_data[b_vid.index as usize];
let b_data = &mut var_data[b_vid.index.widen_(0usize)];
self.expand_node(free_regions, a_region, b_vid, b_data)
}
ConstrainVarSubVar(a_vid, b_vid) => {
match var_data[a_vid.index as usize].value {
match var_data[a_vid.index.widen_(0usize)].value {
NoValue | ErrorValue => false,
Value(a_region) => {
let b_node = &mut var_data[b_vid.index as usize];
let b_node = &mut var_data[b_vid.index.widen_(0usize)];
self.expand_node(free_regions, a_region, b_vid, b_node)
}
}
@@ -1117,16 +1117,16 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
false
}
ConstrainVarSubVar(a_vid, b_vid) => {
match var_data[b_vid.index as usize].value {
match var_data[b_vid.index.widen_(0usize)].value {
NoValue | ErrorValue => false,
Value(b_region) => {
let a_data = &mut var_data[a_vid.index as usize];
let a_data = &mut var_data[a_vid.index.widen_(0usize)];
self.contract_node(free_regions, a_vid, a_data, b_region)
}
}
}
ConstrainVarSubReg(a_vid, b_region) => {
let a_data = &mut var_data[a_vid.index as usize];
let a_data = &mut var_data[a_vid.index.widen_(0usize)];
self.contract_node(free_regions, a_vid, a_data, b_region)
}
}
@@ -1274,12 +1274,12 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
// idea is to report errors that derive from independent
// regions of the graph, but not those that derive from
// overlapping locations.
let mut dup_vec: Vec<_> = repeat(u32::MAX).take(self.num_vars() as usize).collect();
let mut dup_vec: Vec<_> = repeat(u32::MAX).take(self.num_vars().widen()).collect();

let mut opt_graph = None;

for idx in 0..self.num_vars() as usize {
match var_data[idx].value {
for idx in 0..self.num_vars() {
match var_data[idx.widen_(0usize)].value {
Value(_) => {
/* Inference successful */
}
@@ -1319,7 +1319,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
let graph = opt_graph.as_ref().unwrap();

let node_vid = RegionVid { index: idx as u32 };
match var_data[idx].classification {
match var_data[idx.widen_(0usize)].classification {
Expanding => {
self.collect_error_for_expanding_node(
free_regions, graph, var_data, &mut dup_vec,
@@ -1335,7 +1335,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
}
}

(0..self.num_vars() as usize).map(|idx| var_data[idx].value).collect()
(0..self.num_vars()).map(|idx| var_data[idx.widen_(0usize)].value).collect()
}

fn construct_graph(&self) -> RegionGraph {
@@ -1353,17 +1353,17 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
for (constraint, _) in &*constraints {
match *constraint {
ConstrainVarSubVar(a_id, b_id) => {
graph.add_edge(NodeIndex(a_id.index as usize),
NodeIndex(b_id.index as usize),
graph.add_edge(NodeIndex(a_id.index.widen()),
NodeIndex(b_id.index.widen()),
*constraint);
}
ConstrainRegSubVar(_, b_id) => {
graph.add_edge(dummy_idx,
NodeIndex(b_id.index as usize),
NodeIndex(b_id.index.widen()),
*constraint);
}
ConstrainVarSubReg(a_id, _) => {
graph.add_edge(NodeIndex(a_id.index as usize),
graph.add_edge(NodeIndex(a_id.index.widen()),
dummy_idx,
*constraint);
}
@@ -1418,7 +1418,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
debug!("pushing SubSupConflict sub: {:?} sup: {:?}",
lower_bound.region, upper_bound.region);
errors.push(SubSupConflict(
(*self.var_origins.borrow())[node_idx.index as usize].clone(),
(*self.var_origins.borrow())[node_idx.index.widen_(0usize)].clone(),
lower_bound.origin.clone(),
lower_bound.region,
upper_bound.origin.clone(),
@@ -1429,7 +1429,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
}

self.tcx.sess.span_bug(
(*self.var_origins.borrow())[node_idx.index as usize].span(),
(*self.var_origins.borrow())[node_idx.index.widen_(0usize)].span(),
&format!("collect_error_for_expanding_node() could not find error \
for var {:?}, lower_bounds={}, upper_bounds={}",
node_idx,
@@ -1464,7 +1464,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
Ok(_) => {}
Err(_) => {
errors.push(SupSupConflict(
(*self.var_origins.borrow())[node_idx.index as usize].clone(),
(*self.var_origins.borrow())[node_idx.index.widen_(0usize)].clone(),
upper_bound_1.origin.clone(),
upper_bound_1.region,
upper_bound_2.origin.clone(),
@@ -1476,7 +1476,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
}

self.tcx.sess.span_bug(
(*self.var_origins.borrow())[node_idx.index as usize].span(),
(*self.var_origins.borrow())[node_idx.index.widen_(0usize)].span(),
&format!("collect_error_for_contracting_node() could not find error \
for var {:?}, upper_bounds={}",
node_idx,
@@ -1510,12 +1510,12 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {

while !state.stack.is_empty() {
let node_idx = state.stack.pop().unwrap();
let classification = var_data[node_idx.index as usize].classification;
let classification = var_data[node_idx.index.widen_(0usize)].classification;

// check whether we've visited this node on some previous walk
if dup_vec[node_idx.index as usize] == u32::MAX {
dup_vec[node_idx.index as usize] = orig_node_idx.index;
} else if dup_vec[node_idx.index as usize] != orig_node_idx.index {
if dup_vec[node_idx.index.widen_(0usize)] == u32::MAX {
dup_vec[node_idx.index.widen_(0usize)] = orig_node_idx.index;
} else if dup_vec[node_idx.index.widen_(0usize)] != orig_node_idx.index {
state.dup_found = true;
}

@@ -1543,7 +1543,7 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
dir: Direction) {
debug!("process_edges(source_vid={:?}, dir={:?})", source_vid, dir);

let source_node_index = NodeIndex(source_vid.index as usize);
let source_node_index = NodeIndex(source_vid.index.widen());
for (_, edge) in graph.adjacent_edges(source_node_index, dir) {
match edge.data {
ConstrainVarSubVar(from_vid, to_vid) => {
@@ -1627,7 +1627,7 @@ fn normalize(values: &Vec<VarValue>, r: ty::Region) -> ty::Region {
}

fn lookup(values: &Vec<VarValue>, rid: ty::RegionVid) -> ty::Region {
match values[rid.index as usize] {
match values[rid.index.widen_(0usize)] {
Value(r) => r,
NoValue => ReEmpty, // No constraints, return ty::ReEmpty
ErrorValue => ReStatic, // Previously reported error.
@@ -69,11 +69,11 @@ impl<'tcx> TypeVariableTable<'tcx> {
}

fn relations<'a>(&'a mut self, a: ty::TyVid) -> &'a mut Vec<Relation> {
relations(self.values.get_mut(a.index as usize))
relations(self.values.get_mut(a.index.widen()))
}

pub fn var_diverges<'a>(&'a self, vid: ty::TyVid) -> bool {
self.values.get(vid.index as usize).diverging
self.values.get(vid.index.widen()).diverging
}

/// Records that `a <: b`, `a :> b`, or `a == b`, depending on `dir`.
@@ -97,7 +97,7 @@ impl<'tcx> TypeVariableTable<'tcx> {
stack: &mut Vec<(Ty<'tcx>, RelationDir, ty::TyVid)>)
{
let old_value = {
let value_ptr = &mut self.values.get_mut(vid.index as usize).value;
let value_ptr = &mut self.values.get_mut(vid.index.widen()).value;
mem::replace(value_ptr, Known(ty))
};

@@ -123,7 +123,7 @@ impl<'tcx> TypeVariableTable<'tcx> {
}

pub fn probe(&self, vid: ty::TyVid) -> Option<Ty<'tcx>> {
match self.values.get(vid.index as usize).value {
match self.values.get(vid.index.widen()).value {
Bounded(..) => None,
Known(t) => Some(t)
}
@@ -204,12 +204,12 @@ impl<'tcx> sv::SnapshotVecDelegate for Delegate<'tcx> {
fn reverse(values: &mut Vec<TypeVariableData<'tcx>>, action: UndoEntry) {
match action {
SpecifyVar(vid, relations) => {
values[vid.index as usize].value = Bounded(relations);
values[vid.index.widen_(0usize)].value = Bounded(relations);
}

Relate(a, b) => {
relations(&mut (*values)[a.index as usize]).pop();
relations(&mut (*values)[b.index as usize]).pop();
relations(&mut (*values)[a.index.widen_(0usize)]).pop();
relations(&mut (*values)[b.index.widen_(0usize)]).pop();
}
}
}
@@ -98,7 +98,7 @@ impl<'tcx> Substs<'tcx> {
}

pub fn type_for_def(&self, ty_param_def: &ty::TypeParameterDef) -> Ty<'tcx> {
*self.types.get(ty_param_def.space, ty_param_def.index as usize)
*self.types.get(ty_param_def.space, ty_param_def.index.widen())
}

pub fn has_regions_escaping_depth(&self, depth: u32) -> bool {
@@ -625,7 +625,7 @@ impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> {
match self.substs.regions {
ErasedRegions => ty::ReStatic,
NonerasedRegions(ref regions) =>
match regions.opt_get(data.space, data.index as usize) {
match regions.opt_get(data.space, data.index.widen()) {
Some(&r) => {
self.shift_region_through_binders(r)
}
@@ -682,7 +682,7 @@ impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> {
impl<'a,'tcx> SubstFolder<'a,'tcx> {
fn ty_for_param(&self, p: ty::ParamTy, source_ty: Ty<'tcx>) -> Ty<'tcx> {
// Look up the type in the substitutions. It really should be in there.
let opt_ty = self.substs.types.opt_get(p.space, p.idx as usize);
let opt_ty = self.substs.types.opt_get(p.space, p.idx.widen());
let ty = match opt_ty {
Some(t) => *t,
None => {
@@ -2526,7 +2526,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
// type parameter (for now, to avoid tracking edge cases).
let i = if let Some(&ty::ty_param(p)) = fields.last().map(|ty| &ty.sty) {
assert!(p.space == TypeSpace);
p.idx as usize
p.idx.widen_(0usize)
} else {
return Err(Unimplemented);
};
@@ -2544,8 +2544,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
}

// Extract T and U from Struct<T> and Struct<U>.
let inner_source = *substs_a.types.get(TypeSpace, i);
let inner_target = *substs_b.types.get(TypeSpace, i);
let inner_source = *substs_a.types.get(TypeSpace, i.widen());
let inner_target = *substs_b.types.get(TypeSpace, i.widen());

// Check that all the source structure with the unsized
// type parameter is a subtype of the target.
@@ -5335,7 +5335,7 @@ pub fn associated_type_parameter_index(cx: &ctxt,
-> usize {
for type_parameter_def in trait_def.generics.types.iter() {
if type_parameter_def.def_id == associated_type_id {
return type_parameter_def.index as usize
return type_parameter_def.index.widen()
}
}
cx.sess.bug("couldn't find associated type parameter index")
@@ -6140,8 +6140,8 @@ pub fn eval_repeat_count(tcx: &ctxt, count_expr: &ast::Expr) -> usize {
match const_eval::eval_const_expr_partial(tcx, count_expr, Some(tcx.types.usize)) {
Ok(val) => {
let found = match val {
const_eval::const_uint(count) => return count as usize,
const_eval::const_int(count) if count >= 0 => return count as usize,
const_eval::const_uint(count) => return count.truncate(),
const_eval::const_int(count) if count >= 0 => return count.as_unsigned().truncate(),
const_eval::const_int(_) => "negative integer",
const_eval::const_float(_) => "float",
const_eval::const_str(_) => "string",
@@ -157,15 +157,15 @@ impl<K:UnifyKey> UnificationTable<K> {
/// NB. This is a building-block operation and you would probably
/// prefer to call `probe` below.
fn get(&mut self, vid: K) -> VarValue<K> {
let index = vid.index() as usize;
let mut value: VarValue<K> = self.values.get(index).clone();
let index = vid.index();
let mut value: VarValue<K> = self.values.get(index.widen()).clone();
match value.parent(vid) {
Some(redirect) => {
let root: VarValue<K> = self.get(redirect);
if root.key() != redirect {
// Path compression
value.parent = root.key();
self.values.set(index, value);
self.values.set(index.widen(), value);
}
root
}
@@ -176,8 +176,8 @@ impl<K:UnifyKey> UnificationTable<K> {
}

fn is_root(&self, key: K) -> bool {
let index = key.index() as usize;
self.values.get(index).parent(key).is_none()
let index = key.index();
self.values.get(index.widen()).parent(key).is_none()
}

/// Sets the value for `vid` to `new_value`. `vid` MUST be a root
@@ -188,8 +188,8 @@ impl<K:UnifyKey> UnificationTable<K> {
debug!("Updating variable {:?} to {:?}",
key, new_value);

let index = key.index() as usize;
self.values.set(index, new_value);
let index = key.index();
self.values.set(index.widen(), new_value);
}

/// Either redirects `node_a` to `node_b` or vice versa, depending
@@ -93,11 +93,11 @@ impl<'a> Iterator for Iter<'a> {
None
} else {
let name = slice::from_raw_parts(name_ptr as *const u8,
name_len as usize);
name_len.widen());
str::from_utf8(name).ok().map(|s| s.trim())
},
data: slice::from_raw_parts(data_ptr as *const u8,
data_len as usize),
data_len.widen()),
};
::LLVMRustArchiveIteratorNext(self.ptr);
Some(child)
@@ -2263,7 +2263,7 @@ type RustStringRepr = *mut RefCell<Vec<u8>>;
pub unsafe extern "C" fn rust_llvm_string_write_impl(sr: RustStringRef,
ptr: *const c_char,
size: size_t) {
let slice = slice::from_raw_parts(ptr as *const u8, size as usize);
let slice = slice::from_raw_parts(ptr as *const u8, size.widen());

let sr: RustStringRepr = mem::transmute(sr);
(*sr).borrow_mut().push_all(slice);
@@ -331,7 +331,7 @@ pub fn mangle_exported_name<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, path: PathEl
"abcdefghijklmnopqrstuvwxyz\
ABCDEFGHIJKLMNOPQRSTUVWXYZ\
0123456789";
let id = id as usize;
let id = id.widen_(0usize);
let extra1 = id % EXTRA_CHARS.len();
let id = id / EXTRA_CHARS.len();
let extra2 = id % EXTRA_CHARS.len();
@@ -709,7 +709,7 @@ fn write_rlib_bytecode_object_v1(writer: &mut Write,
RLIB_BYTECODE_OBJECT_MAGIC.len() + // magic id
mem::size_of_val(&RLIB_BYTECODE_OBJECT_VERSION) + // version
mem::size_of_val(&bc_data_deflated_size) + // data size field
bc_data_deflated_size as usize; // actual data
bc_data_deflated_size.truncate_(0usize); // actual data

// If the number of bytes written to the object so far is odd, add a
// padding byte to make it even. This works around a crash bug in LLDB
@@ -1100,7 +1100,7 @@ fn add_upstream_rust_crates(cmd: &mut Linker, sess: &Session,
// We may not pass all crates through to the linker. Some crates may
// appear statically in an existing dylib, meaning we'll pick up all the
// symbols from the dylib.
let kind = match data[cnum as usize - 1] {
let kind = match data[cnum.widen_(0usize) - 1] {
Some(t) => t,
None => continue
};
@@ -91,7 +91,8 @@ pub fn run(sess: &session::Session, llmod: ModuleRef,
let data_size = extract_compressed_bytecode_size_v1(bc_encoded);
let compressed_data = &bc_encoded[
link::RLIB_BYTECODE_OBJECT_V1_DATA_OFFSET..
(link::RLIB_BYTECODE_OBJECT_V1_DATA_OFFSET + data_size as usize)];
(link::RLIB_BYTECODE_OBJECT_V1_DATA_OFFSET
+ data_size.truncate_(0usize))];

match flate::inflate_bytes(compressed_data) {
Ok(inflated) => inflated,
@@ -172,8 +172,8 @@ pub const DTOR_NEEDED_U64: u64 = repeat_u8_as_u64!(DTOR_NEEDED);
#[allow(dead_code)]
pub fn dtor_needed_usize(ccx: &CrateContext) -> usize {
match &ccx.tcx().sess.target.target.target_pointer_width[..] {
"32" => DTOR_NEEDED_U32 as usize,
"64" => DTOR_NEEDED_U64 as usize,
"32" => DTOR_NEEDED_U32.widen(),
"64" => DTOR_NEEDED_U64.truncate(),
tws => panic!("Unsupported target word size for int: {}", tws),
}
}
@@ -184,8 +184,8 @@ pub const DTOR_DONE_U64: u64 = repeat_u8_as_u64!(DTOR_DONE);
#[allow(dead_code)]
pub fn dtor_done_usize(ccx: &CrateContext) -> usize {
match &ccx.tcx().sess.target.target.target_pointer_width[..] {
"32" => DTOR_DONE_U32 as usize,
"64" => DTOR_DONE_U64 as usize,
"32" => DTOR_DONE_U32.widen(),
"64" => DTOR_DONE_U64.truncate(),
tws => panic!("Unsupported target word size for int: {}", tws),
}
}
@@ -847,9 +847,9 @@ fn load_discr(bcx: Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr)
assert_eq!(val_ty(ptr), llty.ptr_to());
let bits = machine::llbitsize_of_real(bcx.ccx(), llty);
assert!(bits <= 64);
let bits = bits as usize;
let bits = bits;
let mask = (!0u64 >> (64 - bits)) as Disr;
// For a (max) discr of -1, max will be `-1 as usize`, which overflows.
// For a (max) discr of -1, max will be `-1`, which overflows.
// However, that is fine here (it would still represent the full range),
if (max.wrapping_add(1)) & mask == min & mask {
// i.e., if the range is everything. The lo==hi case would be
@@ -903,16 +903,16 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
General(ity, ref cases, dtor) => {
if dtor_active(dtor) {
let ptr = trans_field_ptr(bcx, r, val, discr,
cases[discr as usize].fields.len() - 2);
Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED as usize), ptr);
cases[discr.truncate_(0usize)].fields.len() - 2);
Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED.widen()), ptr);
}
Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr as u64, true),
GEPi(bcx, val, &[0, 0]));
}
Univariant(ref st, dtor) => {
assert_eq!(discr, 0);
if dtor_active(dtor) {
Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED as usize),
Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED.widen()),
GEPi(bcx, val, &[0, st.fields.len() - 1]));
}
}
@@ -949,7 +949,8 @@ pub fn num_args(r: &Repr, discr: Disr) -> usize {
st.fields.len() - (if dtor_active(dtor) { 1 } else { 0 })
}
General(_, ref cases, dtor) => {
cases[discr as usize].fields.len() - 1 - (if dtor_active(dtor) { 1 } else { 0 })
cases[discr.truncate_(0usize)].fields.len() - 1
- (if dtor_active(dtor) { 1 } else { 0 })
}
RawNullablePointer { nndiscr, ref nullfields, .. } => {
if discr == nndiscr { 1 } else { nullfields.len() }
@@ -976,7 +977,7 @@ pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
struct_field_ptr(bcx, st, val, ix, false)
}
General(_, ref cases, _) => {
struct_field_ptr(bcx, &cases[discr as usize], val, ix + 1, true)
struct_field_ptr(bcx, &cases[discr.truncate_(0usize)], val, ix + 1, true)
}
RawNullablePointer { nndiscr, ref nullfields, .. } |
StructWrappedNullablePointer { nndiscr, ref nullfields, .. } if discr != nndiscr => {
@@ -1119,7 +1120,7 @@ pub fn trans_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>, discr
C_integral(ll_inttype(ccx, ity), discr as u64, true)
}
General(ity, ref cases, _) => {
let case = &cases[discr as usize];
let case = &cases[discr.truncate_(0usize)];
let (max_sz, _) = union_size_and_align(&cases[..]);
let lldiscr = C_integral(ll_inttype(ccx, ity), discr as u64, true);
let mut f = vec![lldiscr];
@@ -981,7 +981,7 @@ fn memfill<'a, 'tcx>(b: &Builder<'a, 'tcx>, llptr: ValueRef, ty: Ty<'tcx>, byte:

let llintrinsicfn = ccx.get_intrinsic(&intrinsic_key);
let llptr = b.pointercast(llptr, Type::i8(ccx).ptr_to());
let llzeroval = C_u8(ccx, byte as usize);
let llzeroval = C_u8(ccx, byte.widen());
let size = machine::llsize_of(ccx, llty);
let align = C_i32(ccx, type_of::align_of(ccx, ty) as i32);
let volatile = C_bool(ccx, false);
@@ -28,7 +28,7 @@ fn align(off: usize, ty: Type) -> usize {

fn ty_align(ty: Type) -> usize {
match ty.kind() {
Integer => ((ty.int_width() as usize) + 7) / 8,
Integer => ((ty.int_width().truncate_(0usize)) + 7) / 8,
Pointer => 8,
Float => 4,
Double => 8,
@@ -55,7 +55,7 @@ fn ty_align(ty: Type) -> usize {

fn ty_size(ty: Type) -> usize {
match ty.kind() {
Integer => ((ty.int_width() as usize) + 7) / 8,
Integer => ((ty.int_width().truncate_(0usize)) + 7) / 8,
Pointer => 8,
Float => 4,
Double => 8,
@@ -132,7 +132,7 @@ fn is_homogenous_aggregate_ty(ty: Type) -> Option<(Type, u64)> {
let (base_ty, members) = (prev_base_ty.unwrap(), members);

// Ensure there is no padding.
if ty_size(ty) == ty_size(base_ty) * (members as usize) {
if ty_size(ty) == ty_size(base_ty) * (members.truncate_(0usize)) {
Some((base_ty, members))
} else {
None
@@ -35,7 +35,7 @@ fn align(off: usize, ty: Type, align_fn: TyAlignFn) -> usize {

fn general_ty_align(ty: Type) -> usize {
match ty.kind() {
Integer => ((ty.int_width() as usize) + 7) / 8,
Integer => ((ty.int_width().truncate_(0usize)) + 7) / 8,
Pointer => 4,
Float => 4,
Double => 8,
@@ -69,7 +69,7 @@ fn general_ty_align(ty: Type) -> usize {
// /iPhoneOSABIReference/Articles/ARMv6FunctionCallingConventions.html
fn ios_ty_align(ty: Type) -> usize {
match ty.kind() {
Integer => cmp::min(4, ((ty.int_width() as usize) + 7) / 8),
Integer => cmp::min(4, ((ty.int_width().truncate_(0usize)) + 7) / 8),
Pointer => 4,
Float => 4,
Double => 4,
@@ -96,7 +96,7 @@ fn ios_ty_align(ty: Type) -> usize {

fn ty_size(ty: Type, align_fn: TyAlignFn) -> usize {
match ty.kind() {
Integer => ((ty.int_width() as usize) + 7) / 8,
Integer => ((ty.int_width().truncate_(0usize)) + 7) / 8,
Pointer => 4,
Float => 4,
Double => 8,
@@ -29,7 +29,7 @@ fn align(off: usize, ty: Type) -> usize {

fn ty_align(ty: Type) -> usize {
match ty.kind() {
Integer => ((ty.int_width() as usize) + 7) / 8,
Integer => (ty.int_width().truncate_(0usize) + 7) / 8,
Pointer => 4,
Float => 4,
Double => 8,
@@ -56,7 +56,7 @@ fn ty_align(ty: Type) -> usize {

fn ty_size(ty: Type) -> usize {
match ty.kind() {
Integer => ((ty.int_width() as usize) + 7) / 8,
Integer => (ty.int_width().truncate_(0usize) + 7) / 8,
Pointer => 4,
Float => 4,
Double => 8,
@@ -30,7 +30,7 @@ fn ty_align(ty: Type) -> usize {
match ty.kind() {
Integer => {
unsafe {
((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as usize) + 7) / 8
((llvm::LLVMGetIntTypeWidth(ty.to_ref())).widen_(0usize) + 7) / 8
}
}
Pointer => 4,
@@ -56,7 +56,7 @@ fn ty_size(ty: Type) -> usize {
match ty.kind() {
Integer => {
unsafe {
((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as usize) + 7) / 8
((llvm::LLVMGetIntTypeWidth(ty.to_ref())).widen_(0usize) + 7) / 8
}
}
Pointer => 4,
@@ -92,7 +92,7 @@ fn classify_ty(ty: Type) -> Vec<RegClass> {

fn ty_align(ty: Type) -> usize {
match ty.kind() {
Integer => ((ty.int_width() as usize) + 7) / 8,
Integer => (ty.int_width().truncate_(0usize) + 7) / 8,
Pointer => 8,
Float => 4,
Double => 8,
@@ -119,7 +119,7 @@ fn classify_ty(ty: Type) -> Vec<RegClass> {

fn ty_size(ty: Type) -> usize {
match ty.kind() {
Integer => (ty.int_width() as usize + 7) / 8,
Integer => (ty.int_width().truncate_(0usize) + 7) / 8,
Pointer => 8,
Float => 4,
Double => 8,
@@ -510,7 +510,7 @@ impl<'tcx> LocalCrateContext<'tcx> {
CrateContext {
shared: shared,
local: self,
index: !0 as usize,
index: !0,
}
}
}
@@ -1126,7 +1126,7 @@ impl<'tcx> StructMemberDescriptionFactory<'tcx> {
}

let field_size = if self.is_simd {
machine::llsize_of_alloc(cx, type_of::type_of(cx, self.fields[0].mt.ty)) as usize
machine::llsize_of_alloc(cx, type_of::type_of(cx, self.fields[0].mt.ty))
} else {
0xdeadbeef
};
@@ -1140,7 +1140,7 @@ impl<'tcx> StructMemberDescriptionFactory<'tcx> {

let offset = if self.is_simd {
assert!(field_size != 0xdeadbeef);
FixedMemberOffset { bytes: i * field_size }
FixedMemberOffset { bytes: i * field_size.truncate_(0usize) }
} else {
ComputedMemberOffset
};
@@ -1352,7 +1352,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> {
// DWARF representation of enums uniform.

// First create a description of the artificial wrapper struct:
let non_null_variant = &(*self.variants)[non_null_variant_index as usize];
let non_null_variant = &(*self.variants)[non_null_variant_index.truncate_(0usize)];
let non_null_variant_name = token::get_name(non_null_variant.name);

// The llvm type and metadata of the pointer
@@ -1397,8 +1397,9 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> {

// Encode the information about the null variant in the union
// member's name.
let null_variant_index = (1 - non_null_variant_index) as usize;
let null_variant_name = token::get_name((*self.variants)[null_variant_index].name);
let null_variant_index = 1 - non_null_variant_index;
let null_variant_name = token::get_name((*self.variants)
[null_variant_index.truncate_(0usize)].name);
let union_member_name = format!("RUST$ENCODED$ENUM${}${}",
0,
null_variant_name);
@@ -1423,7 +1424,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> {
describe_enum_variant(cx,
self.enum_type,
struct_def,
&*(*self.variants)[nndiscr as usize],
&*(*self.variants)[nndiscr.truncate_(0usize)],
OptimizedDiscriminant,
self.containing_scope,
self.span);
@@ -1438,8 +1439,9 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> {

// Encode the information about the null variant in the union
// member's name.
let null_variant_index = (1 - nndiscr) as usize;
let null_variant_name = token::get_name((*self.variants)[null_variant_index].name);
let null_variant_index = 1 - nndiscr;
let null_variant_name = token::get_name((*self.variants)
[null_variant_index.truncate_(0usize)].name);
let discrfield = discrfield.iter()
.skip(1)
.map(|x| x.to_string())
@@ -122,21 +122,21 @@ pub fn check_intrinsics(ccx: &CrateContext) {
&format!("transmute called on types with potentially different sizes: \
{} (could be {} bit{}) to {} (could be {} bit{})",
ty_to_string(ccx.tcx(), transmute_restriction.original_from),
from_type_size as usize,
from_type_size,
if from_type_size == 1 {""} else {"s"},
ty_to_string(ccx.tcx(), transmute_restriction.original_to),
to_type_size as usize,
to_type_size,
if to_type_size == 1 {""} else {"s"}));
} else {
ccx.sess().span_err(
transmute_restriction.span,
&format!("transmute called on types with different sizes: \
{} ({} bit{}) to {} ({} bit{})",
ty_to_string(ccx.tcx(), transmute_restriction.original_from),
from_type_size as usize,
from_type_size,
if from_type_size == 1 {""} else {"s"},
ty_to_string(ccx.tcx(), transmute_restriction.original_to),
to_type_size as usize,
to_type_size,
if to_type_size == 1 {""} else {"s"}));
}
}
@@ -351,7 +351,7 @@ fn iter_vec_loop<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
Br(bcx, loop_bcx.llbb, DebugLoc::None);

let loop_counter = Phi(loop_bcx, bcx.ccx().int_type(),
&[C_uint(bcx.ccx(), 0 as usize)], &[bcx.llbb]);
&[C_uint(bcx.ccx(), 0usize)], &[bcx.llbb]);

let bcx = loop_bcx;

@@ -241,23 +241,24 @@ impl Type {
/// Return the number of elements in `self` if it is a LLVM vector type.
pub fn vector_length(&self) -> usize {
unsafe {
llvm::LLVMGetVectorSize(self.to_ref()) as usize
llvm::LLVMGetVectorSize(self.to_ref()).widen()
}
}

pub fn array_length(&self) -> usize {
unsafe {
llvm::LLVMGetArrayLength(self.to_ref()) as usize
llvm::LLVMGetArrayLength(self.to_ref()).widen()
}
}

pub fn field_types(&self) -> Vec<Type> {
unsafe {
let n_elts = llvm::LLVMCountStructElementTypes(self.to_ref()) as usize;
let n_elts = llvm::LLVMCountStructElementTypes(self.to_ref());
if n_elts == 0 {
return Vec::new();
}
let mut elts: Vec<_> = repeat(Type { rf: ptr::null_mut() }).take(n_elts).collect();
let mut elts: Vec<_> = repeat(Type { rf: ptr::null_mut() })
.take(n_elts.widen()).collect();
llvm::LLVMGetStructElementTypes(self.to_ref(),
elts.as_mut_ptr() as *mut TypeRef);
elts
@@ -270,8 +271,9 @@ impl Type {

pub fn func_params(&self) -> Vec<Type> {
unsafe {
let n_args = llvm::LLVMCountParamTypes(self.to_ref()) as usize;
let mut args: Vec<_> = repeat(Type { rf: ptr::null_mut() }).take(n_args).collect();
let n_args = llvm::LLVMCountParamTypes(self.to_ref());
let mut args: Vec<_> = repeat(Type { rf: ptr::null_mut() })
.take(n_args.widen()).collect();
llvm::LLVMGetParamTypes(self.to_ref(),
args.as_mut_ptr() as *mut TypeRef);
args
@@ -1600,10 +1600,10 @@ pub fn ast_ty_to_ty<'tcx>(this: &AstConv<'tcx>,
match r {
const_eval::const_int(i) =>
ty::mk_vec(tcx, ast_ty_to_ty(this, rscope, &**ty),
Some(i as usize)),
Some(i.as_unsigned().truncate())),
const_eval::const_uint(i) =>
ty::mk_vec(tcx, ast_ty_to_ty(this, rscope, &**ty),
Some(i as usize)),
Some(i.truncate())),
_ => {
span_err!(tcx.sess, ast_ty.span, E0249,
"expected constant integer expression \
@@ -4955,7 +4955,7 @@ pub fn check_bounds_are_used<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
match t.sty {
ty::ty_param(ParamTy {idx, ..}) => {
debug!("Found use of ty param num {}", idx);
tps_used[idx as usize] = true;
tps_used[idx.widen_(0usize)] = true;
}
_ => ()
}
@@ -1862,7 +1862,7 @@ fn get_or_create_type_parameter_def<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>,
index: u32)
-> ty::TypeParameterDef<'tcx>
{
let param = &ast_generics.ty_params[index as usize];
let param = &ast_generics.ty_params[index.widen_(0usize)];

let tcx = ccx.tcx;
match tcx.ty_param_defs.borrow().get(&param.id) {
@@ -956,7 +956,7 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
}

ty::ty_param(ref data) => {
let def_id = generics.types.get(data.space, data.idx as usize).def_id;
let def_id = generics.types.get(data.space, data.idx.widen()).def_id;
assert_eq!(def_id.krate, ast::LOCAL_CRATE);
match self.terms_cx.inferred_map.get(&def_id.node) {
Some(&index) => {
@@ -1006,9 +1006,9 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
for p in type_param_defs {
let variance_decl =
self.declared_variance(p.def_id, def_id, TypeParam,
p.space, p.index as usize);
p.space, p.index.widen());
let variance_i = self.xform(variance, variance_decl);
let substs_ty = *substs.types.get(p.space, p.index as usize);
let substs_ty = *substs.types.get(p.space, p.index.widen());
debug!("add_constraints_from_substs: variance_decl={:?} variance_i={:?}",
variance_decl, variance_i);
self.add_constraints_from_ty(generics, substs_ty, variance_i);
@@ -1017,9 +1017,9 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
for p in region_param_defs {
let variance_decl =
self.declared_variance(p.def_id, def_id,
RegionParam, p.space, p.index as usize);
RegionParam, p.space, p.index.widen());
let variance_i = self.xform(variance, variance_decl);
let substs_r = *substs.regions().get(p.space, p.index as usize);
let substs_r = *substs.regions().get(p.space, p.index.widen());
self.add_constraints_from_region(generics, substs_r, variance_i);
}
}
@@ -4360,6 +4360,7 @@ pub mod charwidth {
use core::option::Option::{Some, None};
use core::slice::SliceExt;
use core::result::Result::{Ok, Err};
use core::num::Widen;

fn bsearch_range_value_table(c: char, is_cjk: bool, r: &'static [(char, char, u8, u8)]) -> u8 {
use core::cmp::Ordering::{Equal, Less, Greater};
@@ -4382,7 +4383,7 @@ pub mod charwidth {
cu if cu < 0x20 => None, // control sequences have no width
cu if cu < 0x7F => Some(1), // ASCII
cu if cu < 0xA0 => None, // more control sequences
_ => Some(bsearch_range_value_table(c, is_cjk, charwidth_table) as usize)
_ => Some(bsearch_range_value_table(c, is_cjk, charwidth_table).widen())
}
}

@@ -22,6 +22,7 @@ use core::iter::Filter;
use core::mem;
use core::slice;
use core::str::Split;
use core::num::Widen;

use tables::grapheme::GraphemeCat;

@@ -391,7 +392,7 @@ static UTF8_CHAR_WIDTH: [u8; 256] = [
/// Given a first byte, determine how many bytes are in this UTF-8 character
#[inline]
pub fn utf8_char_width(b: u8) -> usize {
return UTF8_CHAR_WIDTH[b as usize] as usize;
UTF8_CHAR_WIDTH[b.widen_(0usize)].widen()
}

/// Determines if a vector of `u16` contains valid UTF-16
@@ -173,7 +173,7 @@ extern {
// hoedown_buffer helpers
impl hoedown_buffer {
fn as_bytes(&self) -> &[u8] {
unsafe { slice::from_raw_parts(self.data, self.size as usize) }
unsafe { slice::from_raw_parts(self.data, self.size.widen()) }
}
}

@@ -505,7 +505,7 @@ fn build_index(krate: &clean::Crate, cache: &mut Cache) -> io::Result<String> {
try!(write!(&mut w, ","));
}
try!(write!(&mut w, r#"[{},"{}","{}",{}"#,
item.ty as usize, item.name, path,
item.ty, item.name, path,
item.desc.to_json().to_string()));
match item.parent {
Some(nodeid) => {
@@ -529,7 +529,7 @@ fn build_index(krate: &clean::Crate, cache: &mut Cache) -> io::Result<String> {
try!(write!(&mut w, ","));
}
try!(write!(&mut w, r#"[{},"{}"]"#,
short as usize, *fqp.last().unwrap()));
short, *fqp.last().unwrap()));
}

try!(write!(&mut w, "]}};"));
@@ -42,8 +42,8 @@ impl ToHex for [u8] {
fn to_hex(&self) -> String {
let mut v = Vec::with_capacity(self.len() * 2);
for &byte in self {
v.push(CHARS[(byte >> 4) as usize]);
v.push(CHARS[(byte & 0xf) as usize]);
v.push(CHARS[(byte >> 4).widen_(0usize)]);
v.push(CHARS[(byte & 0xf).widen_(0usize)]);
}

unsafe {
@@ -186,18 +186,18 @@ mod tests {
#[test]
pub fn test_to_hex_all_bytes() {
for i in 0..256 {
assert_eq!([i as u8].to_hex(), format!("{:02x}", i as usize));
assert_eq!([i as u8].to_hex(), format!("{:02x}", i));
}
}

#[test]
pub fn test_from_hex_all_bytes() {
for i in 0..256 {
let ii: &[u8] = &[i as u8];
assert_eq!(format!("{:02x}", i as usize).from_hex()
assert_eq!(format!("{:02x}", i).from_hex()
.unwrap(),
ii);
assert_eq!(format!("{:02X}", i as usize).from_hex()
assert_eq!(format!("{:02X}", i).from_hex()
.unwrap(),
ii);
}
@@ -1316,7 +1316,7 @@ impl Stack {
InternalIndex(i) => StackElement::Index(i),
InternalKey(start, size) => {
StackElement::Key(str::from_utf8(
&self.str_buffer[start as usize .. start as usize + size as usize])
&self.str_buffer[start.widen() .. (start + size).widen()])
.unwrap())
}
}
@@ -1359,7 +1359,7 @@ impl Stack {
Some(&InternalIndex(i)) => Some(StackElement::Index(i)),
Some(&InternalKey(start, size)) => {
Some(StackElement::Key(str::from_utf8(
&self.str_buffer[start as usize .. (start+size) as usize]
&self.str_buffer[start.widen() .. (start+size).widen()]
).unwrap()))
}
}
@@ -1383,7 +1383,7 @@ impl Stack {
assert!(!self.is_empty());
match *self.stack.last().unwrap() {
InternalKey(_, sz) => {
let new_size = self.str_buffer.len() - sz as usize;
let new_size = self.str_buffer.len() - sz.widen_(0usize);
self.str_buffer.truncate(new_size);
}
InternalIndex(_) => {}
@@ -3930,15 +3930,15 @@ mod tests {
assert_eq!((vec![1_usize, 2_usize]).to_json(), array2);
assert_eq!(vec!(1_usize, 2_usize, 3_usize).to_json(), array3);
let mut tree_map = BTreeMap::new();
tree_map.insert("a".to_string(), 1 as usize);
tree_map.insert("a".to_string(), 1);
tree_map.insert("b".to_string(), 2);
assert_eq!(tree_map.to_json(), object);
let mut hash_map = HashMap::new();
hash_map.insert("a".to_string(), 1 as usize);
hash_map.insert("a".to_string(), 1);
hash_map.insert("b".to_string(), 2);
assert_eq!(hash_map.to_json(), object);
assert_eq!(Some(15).to_json(), I64(15));
assert_eq!(Some(15 as usize).to_json(), U64(15));
assert_eq!(Some(15).to_json(), U64(15));
assert_eq!(None::<isize>.to_json(), Null);
}

@@ -266,9 +266,9 @@ impl AsciiExt for u8 {
#[inline]
fn is_ascii(&self) -> bool { *self & 128 == 0 }
#[inline]
fn to_ascii_uppercase(&self) -> u8 { ASCII_UPPERCASE_MAP[*self as usize] }
fn to_ascii_uppercase(&self) -> u8 { ASCII_UPPERCASE_MAP[self.widen_(0usize)] }
#[inline]
fn to_ascii_lowercase(&self) -> u8 { ASCII_LOWERCASE_MAP[*self as usize] }
fn to_ascii_lowercase(&self) -> u8 { ASCII_LOWERCASE_MAP[self.widen_(0usize)] }
#[inline]
fn eq_ignore_ascii_case(&self, other: &u8) -> bool {
self.to_ascii_lowercase() == other.to_ascii_lowercase()
@@ -12,6 +12,7 @@ use self::Entry::*;
use self::SearchResult::*;
use self::VacantEntryState::*;

use core::num::ConvertSign;
use borrow::Borrow;
use clone::Clone;
use cmp::{max, Eq, PartialEq};
@@ -807,7 +808,7 @@ impl<K, V, S> HashMap<K, V, S>

if (ib as isize) < robin_ib {
// Found a luckier bucket than me. Better steal his spot.
return robin_hood(bucket, robin_ib as usize, hash, k, v);
return robin_hood(bucket, robin_ib.as_unsigned(), hash, k, v);
}

probe = bucket.next();
@@ -1191,7 +1192,7 @@ fn search_entry_hashed<'a, K: Eq, V>(table: &'a mut RawTable<K,V>, hash: SafeHas
return Vacant(VacantEntry {
hash: hash,
key: k,
elem: NeqElem(bucket, robin_ib as usize),
elem: NeqElem(bucket, robin_ib.as_unsigned()),
});
}

@@ -24,6 +24,7 @@ use option::Option::{Some, None};
use ptr::{self, Unique};
use rt::heap::{allocate, deallocate, EMPTY};
use collections::hash_state::HashState;
use core::num::Truncate;

const EMPTY_BUCKET: u64 = 0;

@@ -224,7 +225,7 @@ impl<K, V, M> Bucket<K, V, M> {

impl<K, V, M: Deref<Target=RawTable<K, V>>> Bucket<K, V, M> {
pub fn new(table: M, hash: SafeHash) -> Bucket<K, V, M> {
Bucket::at_index(table, hash.inspect() as usize)
Bucket::at_index(table, hash.inspect().truncate())
}

pub fn at_index(table: M, ib_index: usize) -> Bucket<K, V, M> {
@@ -378,7 +379,7 @@ impl<K, V, M: Deref<Target=RawTable<K, V>>> FullBucket<K, V, M> {
// Calculates the distance one has to travel when going from
// `hash mod capacity` onwards to `idx mod capacity`, wrapping around
// if the destination is not reached before the end of the table.
(self.idx.wrapping_sub(self.hash().inspect() as usize)) & (self.table.capacity() - 1)
(self.idx.wrapping_sub(self.hash().inspect().truncate())) & (self.table.capacity() - 1)
}

#[inline]
@@ -10,6 +10,7 @@

#![unstable(feature = "std_misc")]

use core::num::Widen;
use borrow::Cow;
use convert::{Into, From};
use cmp::{PartialEq, Eq, PartialOrd, Ord, Ordering};
@@ -306,7 +307,7 @@ impl CStr {
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn from_ptr<'a>(ptr: *const libc::c_char) -> &'a CStr {
let len = libc::strlen(ptr);
mem::transmute(slice::from_raw_parts(ptr, len as usize + 1))
mem::transmute(slice::from_raw_parts(ptr, len.widen_(0usize) + 1))
}

/// Returns the inner pointer to this C string.
@@ -1673,8 +1673,8 @@ mod tests {
let stem = f.file_stem().unwrap().to_str().unwrap();
let root = stem.as_bytes()[0] - b'0';
let name = stem.as_bytes()[1] - b'0';
assert!(cur[root as usize] < name);
cur[root as usize] = name;
assert!(cur[root.widen_(0usize)] < name);
cur[root.widen_(0usize)] = name;
}

check!(fs::remove_dir_all(dir));
@@ -112,7 +112,7 @@ macro_rules! buffer {
() => {
fn fill_buf(&mut self) -> io::Result<&[u8]> {
let amt = cmp::min(self.pos, self.inner.len() as u64);
Ok(&self.inner[(amt as usize)..])
Ok(&self.inner[amt.truncate()..])
}
fn consume(&mut self, amt: usize) { self.pos += amt as u64; }
}
@@ -129,7 +129,7 @@ impl<'a> BufRead for Cursor<Vec<u8>> { buffer!(); }
impl<'a> Write for Cursor<&'a mut [u8]> {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
let pos = cmp::min(self.pos, self.inner.len() as u64);
let amt = try!((&mut self.inner[(pos as usize)..]).write(data));
let amt = try!((&mut self.inner[pos.truncate()..]).write(data));
self.pos += amt as u64;
Ok(amt)
}
@@ -143,13 +143,13 @@ impl Write for Cursor<Vec<u8>> {
// currently are
let pos = self.position();
let amt = pos.saturating_sub(self.inner.len() as u64);
self.inner.extend(repeat(0).take(amt as usize));
self.inner.extend(repeat(0).take(amt.truncate()));

// Figure out what bytes will be used to overwrite what's currently
// there (left), and what will be appended on the end (right)
let space = self.inner.len() - pos as usize;
let space = self.inner.len() - pos.truncate_(0usize);
let (left, right) = buf.split_at(cmp::min(space, buf.len()));
slice::bytes::copy_memory(left, &mut self.inner[(pos as usize)..]);
slice::bytes::copy_memory(left, &mut self.inner[pos.truncate()..]);
self.inner.push_all(right);

// Bump us forward
@@ -25,6 +25,7 @@ use result;
use string::String;
use str;
use vec::Vec;
use core::num::Truncate;

pub use self::buffered::{BufReader, BufWriter, BufStream, LineWriter};
pub use self::buffered::IntoInnerError;
@@ -698,7 +699,7 @@ impl<T: Read> Read for Take<T> {
return Ok(0);
}

let max = cmp::min(buf.len() as u64, self.limit) as usize;
let max = cmp::min(buf.len() as u64, self.limit).truncate();
let n = try!(self.inner.read(&mut buf[..max]));
self.limit -= n as u64;
Ok(n)
@@ -709,13 +710,13 @@ impl<T: Read> Read for Take<T> {
impl<T: BufRead> BufRead for Take<T> {
fn fill_buf(&mut self) -> Result<&[u8]> {
let buf = try!(self.inner.fill_buf());
let cap = cmp::min(buf.len() as u64, self.limit) as usize;
let cap = cmp::min(buf.len() as u64, self.limit).truncate();
Ok(&buf[..cap])
}

fn consume(&mut self, amt: usize) {
// Don't let callers reset the limit by passing an overlarge value
let amt = cmp::min(amt as u64, self.limit) as usize;
let amt = cmp::min(amt as u64, self.limit).truncate();
self.limit -= amt as u64;
self.inner.consume(amt);
}
@@ -19,6 +19,7 @@
pub use core::num::{Zero, One};
pub use core::num::{FpCategory, ParseIntError, ParseFloatError};
pub use core::num::{wrapping, Wrapping};
pub use core::num::{Widen, Truncate, ConvertSign};

#[cfg(test)] use cmp::PartialEq;
#[cfg(test)] use fmt;
@@ -49,3 +49,5 @@
#[doc(no_inline)] pub use string::{String, ToString};
#[stable(feature = "rust1", since = "1.0.0")]
#[doc(no_inline)] pub use vec::Vec;

pub use num::{Widen, Truncate, ConvertSign};
@@ -72,7 +72,7 @@ mod imp {
panic!("unexpected getrandom error: {}", err);
}
} else {
read += result as usize;
read += result.as_unsigned().widen_(0usize);
}
}
}
@@ -44,7 +44,7 @@ pub fn getsockopt<T: Copy>(sock: &Socket, opt: c_int,
try!(cvt(c::getsockopt(*sock.as_inner(), opt, val,
&mut slot as *mut _ as *mut _,
&mut len)));
assert_eq!(len as usize, mem::size_of::<T>());
assert_eq!(len.as_unsigned().widen_(0usize), mem::size_of::<T>());
Ok(slot)
}
}
@@ -56,21 +56,21 @@ fn sockname<F>(f: F) -> io::Result<SocketAddr>
let mut storage: libc::sockaddr_storage = mem::zeroed();
let mut len = mem::size_of_val(&storage) as socklen_t;
try!(cvt(f(&mut storage as *mut _ as *mut _, &mut len)));
sockaddr_to_addr(&storage, len as usize)
sockaddr_to_addr(&storage, len.as_unsigned().widen())
}
}

fn sockaddr_to_addr(storage: &libc::sockaddr_storage,
len: usize) -> io::Result<SocketAddr> {
match storage.ss_family as libc::c_int {
libc::AF_INET => {
assert!(len as usize >= mem::size_of::<libc::sockaddr_in>());
assert!(len >= mem::size_of::<libc::sockaddr_in>());
Ok(SocketAddr::V4(FromInner::from_inner(unsafe {
*(storage as *const _ as *const libc::sockaddr_in)
})))
}
libc::AF_INET6 => {
assert!(len as usize >= mem::size_of::<libc::sockaddr_in6>());
assert!(len >= mem::size_of::<libc::sockaddr_in6>());
Ok(SocketAddr::V6(FromInner::from_inner(unsafe {
*(storage as *const _ as *const libc::sockaddr_in6)
})))
@@ -103,7 +103,7 @@ impl Iterator for LookupHost {
unsafe {
if self.cur.is_null() { return None }
let ret = sockaddr_to_addr(mem::transmute((*self.cur).ai_addr),
(*self.cur).ai_addrlen as usize);
(*self.cur).ai_addrlen.widen());
self.cur = (*self.cur).ai_next as *mut libc::addrinfo;
Some(ret)
}
@@ -247,7 +247,7 @@ impl TcpStream {
buf.len() as wrlen_t,
0)
}));
Ok(ret as usize)
Ok(ret.as_unsigned().widen())
}

pub fn peer_addr(&self) -> io::Result<SocketAddr> {
@@ -347,7 +347,7 @@ impl TcpListener {
let mut len = mem::size_of_val(&storage) as socklen_t;
let sock = try!(self.inner.accept(&mut storage as *mut _ as *mut _,
&mut len));
let addr = try!(sockaddr_to_addr(&storage, len as usize));
let addr = try!(sockaddr_to_addr(&storage, len.as_unsigned().widen()));
Ok((TcpStream { inner: sock, }, addr))
}

@@ -412,7 +412,8 @@ impl UdpSocket {
buf.len() as wrlen_t, 0,
&mut storage as *mut _ as *mut _, &mut addrlen)
}));
Ok((n as usize, try!(sockaddr_to_addr(&storage, addrlen as usize))))
Ok((n.as_unsigned().widen(),
try!(sockaddr_to_addr(&storage, addrlen.as_unsigned().widen()))))
}

pub fn send_to(&self, buf: &[u8], dst: &SocketAddr) -> io::Result<usize> {
@@ -422,7 +423,7 @@ impl UdpSocket {
buf.as_ptr() as *const c_void, buf.len() as wrlen_t,
0, dstp, dstlen)
}));
Ok(ret as usize)
Ok(ret.as_unsigned().widen())
}

pub fn set_broadcast(&self, on: bool) -> io::Result<()> {
@@ -282,7 +282,7 @@ pub unsafe fn get_sp_limit() -> usize {
#[inline(always)]
unsafe fn target_get_sp_limit() -> usize {
use libc::c_void;
return get_sp_limit() as usize;
return get_sp_limit();
extern {
fn get_sp_limit() -> *const c_void;
}
@@ -188,9 +188,9 @@ impl StaticKey {
key2
};
assert!(key != 0);
match self.key.compare_and_swap(0, key as usize, Ordering::SeqCst) {
match self.key.compare_and_swap(0, key.widen(), Ordering::SeqCst) {
// The CAS succeeded, so we've created the actual key
0 => key as usize,
0 => key.widen(),
// If someone beat us to the punch, use their key instead
n => { imp::destroy(key); n }
}
@@ -124,7 +124,7 @@ pub fn write(w: &mut Write) -> io::Result<()> {
// 100 lines should be enough
const SIZE: usize = 100;
let mut buf: [*mut libc::c_void; SIZE] = unsafe {mem::zeroed()};
let cnt = unsafe { backtrace(buf.as_mut_ptr(), SIZE as libc::c_int) as usize};
let cnt = unsafe { backtrace(buf.as_mut_ptr(), SIZE as libc::c_int)};

// skipping the first one as it is write itself
let iter = (1..cnt).map(|i| {
@@ -175,7 +175,7 @@ mod select {
}

pub fn fd_set(set: &mut fd_set, fd: i32) {
set.fds_bits[(fd / 32) as usize] |= 1 << ((fd % 32) as usize);
set.fds_bits[(fd / 32).widen_(0usize)] |= 1 << ((fd % 32).widen_(0usize));
}
}

@@ -188,6 +188,7 @@ mod select {
mod select {
use usize;
use libc;
use core::num::{ConvertSign, Widen};

pub const FD_SETSIZE: usize = 1024;

@@ -198,7 +199,7 @@ mod select {
}

pub fn fd_set(set: &mut fd_set, fd: i32) {
let fd = fd as usize;
let fd = fd.as_unsigned().widen_(0usize);
set.fds_bits[fd / usize::BITS] |= 1 << (fd % usize::BITS);
}
}
@@ -16,6 +16,7 @@ use mem;
use sys::c;
use sys::cvt;
use sys_common::AsInner;
use core::num::{ConvertSign, Widen};

pub struct FileDesc {
fd: c_int,
@@ -41,7 +42,7 @@ impl FileDesc {
buf.as_mut_ptr() as *mut c_void,
buf.len() as size_t)
}));
Ok(ret as usize)
Ok(ret.as_unsigned().widen())
}

pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
@@ -50,7 +51,7 @@ impl FileDesc {
buf.as_ptr() as *const c_void,
buf.len() as size_t)
}));
Ok(ret as usize)
Ok(ret.as_unsigned().widen())
}

pub fn set_cloexec(&self) {
@@ -25,6 +25,7 @@ use sys::platform::raw;
use sys::{c, cvt, cvt_r};
use sys_common::{AsInner, FromInner};
use vec::Vec;
use core::num::{ConvertSign, Widen};

pub struct File(FileDesc);

@@ -140,7 +141,7 @@ impl Iterator for ReadDir {
}

let mut buf: Vec<u8> = Vec::with_capacity(unsafe {
rust_dirent_t_size() as usize
rust_dirent_t_size().as_unsigned().widen()
});
let ptr = buf.as_mut_ptr() as *mut libc::dirent_t;

@@ -465,12 +466,12 @@ pub fn readlink(p: &Path) -> io::Result<PathBuf> {
if len < 0 {
len = 1024; // FIXME: read PATH_MAX from C ffi?
}
let mut buf: Vec<u8> = Vec::with_capacity(len as usize);
let mut buf: Vec<u8> = Vec::with_capacity(len.as_unsigned().widen());
unsafe {
let n = try!(cvt({
libc::readlink(p, buf.as_ptr() as *mut c_char, len as size_t)
}));
buf.set_len(n as usize);
buf.set_len(n.as_unsigned().widen());
}
Ok(PathBuf::from(OsString::from_vec(buf)))
}
Oops, something went wrong.