Skip to content
Permalink
Browse files

Auto merge of #58003 - nikic:saturating-add, r=nagisa

Use LLVM intrinsics for saturating add/sub

Use the `[su](add|sub).sat` LLVM intrinsics, if we're compiling against LLVM 8, as they should optimize and codegen better than IR based on `[su](add|sub).with.overlow`.

For the fallback for LLVM < 8 I'm using the same expansion that target lowering in LLVM uses, which is not the same as Rust currently uses (in particular due to the use of selects rather than branches).

Fixes #55286.
Fixes #52203.
Fixes #44500.

r? @nagisa
  • Loading branch information...
bors committed Jan 31, 2019
2 parents d30b99f + 4a4186e commit 8a0e5faec7f62e3cfd88d6625ce213d93b061305
@@ -1493,6 +1493,19 @@ extern "rust-intrinsic" {
/// [`std::u32::wrapping_mul`](../../std/primitive.u32.html#method.wrapping_mul)
pub fn overflowing_mul<T>(a: T, b: T) -> T;

/// Computes `a + b`, while saturating at numeric bounds.
/// The stabilized versions of this intrinsic are available on the integer
/// primitives via the `saturating_add` method. For example,
/// [`std::u32::saturating_add`](../../std/primitive.u32.html#method.saturating_add)
#[cfg(not(stage0))]
pub fn saturating_add<T>(a: T, b: T) -> T;
/// Computes `a - b`, while saturating at numeric bounds.
/// The stabilized versions of this intrinsic are available on the integer
/// primitives via the `saturating_sub` method. For example,
/// [`std::u32::saturating_sub`](../../std/primitive.u32.html#method.saturating_sub)
#[cfg(not(stage0))]
pub fn saturating_sub<T>(a: T, b: T) -> T;

/// Returns the value of the discriminant for the variant in 'v',
/// cast to a `u64`; if `T` has no discriminant, returns 0.
pub fn discriminant_value<T>(v: &T) -> u64;
@@ -883,11 +883,16 @@ $EndFeature, "
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn saturating_add(self, rhs: Self) -> Self {
#[cfg(stage0)]
match self.checked_add(rhs) {
Some(x) => x,
None if rhs >= 0 => Self::max_value(),
None => Self::min_value(),
}
#[cfg(not(stage0))]
{
intrinsics::saturating_add(self, rhs)
}
}
}

@@ -908,11 +913,16 @@ $EndFeature, "
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn saturating_sub(self, rhs: Self) -> Self {
#[cfg(stage0)]
match self.checked_sub(rhs) {
Some(x) => x,
None if rhs >= 0 => Self::min_value(),
None => Self::max_value(),
}
#[cfg(not(stage0))]
{
intrinsics::saturating_sub(self, rhs)
}
}
}

@@ -2744,10 +2754,15 @@ assert_eq!(200u8.saturating_add(127), 255);", $EndFeature, "
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn saturating_add(self, rhs: Self) -> Self {
#[cfg(stage0)]
match self.checked_add(rhs) {
Some(x) => x,
None => Self::max_value(),
}
#[cfg(not(stage0))]
{
intrinsics::saturating_add(self, rhs)
}
}
}

@@ -2766,10 +2781,15 @@ assert_eq!(13", stringify!($SelfT), ".saturating_sub(127), 0);", $EndFeature, "
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn saturating_sub(self, rhs: Self) -> Self {
#[cfg(stage0)]
match self.checked_sub(rhs) {
Some(x) => x,
None => Self::min_value(),
}
#[cfg(not(stage0))]
{
intrinsics::saturating_sub(self, rhs)
}
}
}

@@ -757,6 +757,30 @@ impl CodegenCx<'b, 'tcx> {
ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1});
ifn!("llvm.umul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1});

ifn!("llvm.sadd.sat.i8", fn(t_i8, t_i8) -> t_i8);
ifn!("llvm.sadd.sat.i16", fn(t_i16, t_i16) -> t_i16);
ifn!("llvm.sadd.sat.i32", fn(t_i32, t_i32) -> t_i32);
ifn!("llvm.sadd.sat.i64", fn(t_i64, t_i64) -> t_i64);
ifn!("llvm.sadd.sat.i128", fn(t_i128, t_i128) -> t_i128);

ifn!("llvm.uadd.sat.i8", fn(t_i8, t_i8) -> t_i8);
ifn!("llvm.uadd.sat.i16", fn(t_i16, t_i16) -> t_i16);
ifn!("llvm.uadd.sat.i32", fn(t_i32, t_i32) -> t_i32);
ifn!("llvm.uadd.sat.i64", fn(t_i64, t_i64) -> t_i64);
ifn!("llvm.uadd.sat.i128", fn(t_i128, t_i128) -> t_i128);

ifn!("llvm.ssub.sat.i8", fn(t_i8, t_i8) -> t_i8);
ifn!("llvm.ssub.sat.i16", fn(t_i16, t_i16) -> t_i16);
ifn!("llvm.ssub.sat.i32", fn(t_i32, t_i32) -> t_i32);
ifn!("llvm.ssub.sat.i64", fn(t_i64, t_i64) -> t_i64);
ifn!("llvm.ssub.sat.i128", fn(t_i128, t_i128) -> t_i128);

ifn!("llvm.usub.sat.i8", fn(t_i8, t_i8) -> t_i8);
ifn!("llvm.usub.sat.i16", fn(t_i16, t_i16) -> t_i16);
ifn!("llvm.usub.sat.i32", fn(t_i32, t_i32) -> t_i32);
ifn!("llvm.usub.sat.i64", fn(t_i64, t_i64) -> t_i64);
ifn!("llvm.usub.sat.i128", fn(t_i128, t_i128) -> t_i128);

ifn!("llvm.lifetime.start", fn(t_i64,i8p) -> void);
ifn!("llvm.lifetime.end", fn(t_i64, i8p) -> void);

@@ -14,7 +14,7 @@ use type_::Type;
use type_of::LayoutLlvmExt;
use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, LayoutOf, HasTyCtxt, Primitive};
use rustc_codegen_ssa::common::TypeKind;
use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
use rustc::hir;
use syntax::ast::{self, FloatTy};
use syntax::symbol::Symbol;
@@ -28,7 +28,7 @@ use rustc::session::Session;
use syntax_pos::Span;

use std::cmp::Ordering;
use std::iter;
use std::{iter, i128, u128};

fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> {
let llvm_name = match name {
@@ -342,7 +342,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
"bitreverse" | "add_with_overflow" | "sub_with_overflow" |
"mul_with_overflow" | "overflowing_add" | "overflowing_sub" | "overflowing_mul" |
"unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" | "exact_div" |
"rotate_left" | "rotate_right" => {
"rotate_left" | "rotate_right" | "saturating_add" | "saturating_sub" => {
let ty = arg_tys[0];
match int_type_width_signed(ty, self) {
Some((width, signed)) =>
@@ -468,6 +468,44 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
self.or(shift1, shift2)
}
},
"saturating_add" | "saturating_sub" => {
let is_add = name == "saturating_add";
let lhs = args[0].immediate();
let rhs = args[1].immediate();
if llvm_util::get_major_version() >= 8 {
let llvm_name = &format!("llvm.{}{}.sat.i{}",
if signed { 's' } else { 'u' },
if is_add { "add" } else { "sub" },
width);
let llfn = self.get_intrinsic(llvm_name);
self.call(llfn, &[lhs, rhs], None)
} else {
let llvm_name = &format!("llvm.{}{}.with.overflow.i{}",
if signed { 's' } else { 'u' },
if is_add { "add" } else { "sub" },
width);
let llfn = self.get_intrinsic(llvm_name);
let pair = self.call(llfn, &[lhs, rhs], None);
let val = self.extract_value(pair, 0);
let overflow = self.extract_value(pair, 1);
let llty = self.type_ix(width);

let limit = if signed {
let limit_lo = self.const_uint_big(
llty, (i128::MIN >> (128 - width)) as u128);
let limit_hi = self.const_uint_big(
llty, (i128::MAX >> (128 - width)) as u128);
let neg = self.icmp(
IntPredicate::IntSLT, val, self.const_uint(llty, 0));
self.select(neg, limit_hi, limit_lo)
} else if is_add {
self.const_uint_big(llty, u128::MAX >> (128 - width))
} else {
self.const_uint(llty, 0)
};
self.select(overflow, limit, val)
}
},
_ => bug!(),
},
None => {
@@ -68,6 +68,7 @@ pub fn intrisic_operation_unsafety(intrinsic: &str) -> hir::Unsafety {
"size_of" | "min_align_of" | "needs_drop" |
"add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" |
"overflowing_add" | "overflowing_sub" | "overflowing_mul" |
"saturating_add" | "saturating_sub" |
"rotate_left" | "rotate_right" |
"ctpop" | "ctlz" | "cttz" | "bswap" | "bitreverse"
=> hir::Unsafety::Normal,
@@ -307,6 +308,8 @@ pub fn check_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,

"overflowing_add" | "overflowing_sub" | "overflowing_mul" =>
(1, vec![param(0), param(0)], param(0)),
"saturating_add" | "saturating_sub" =>
(1, vec![param(0), param(0)], param(0)),
"fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" =>
(1, vec![param(0), param(0)], param(0)),

0 comments on commit 8a0e5fa

Please sign in to comment.
You can’t perform that action at this time.