Skip to content

Commit

Permalink
[InstCombine] vector_reduce_umin(?ext(<n x i1>)) --> `?ext(vector_r…
Browse files Browse the repository at this point in the history
…educe_and(<n x i1>))` (PR51259)

Alive2 agrees:
https://alive2.llvm.org/ce/z/XxUScW (self)
https://alive2.llvm.org/ce/z/3usTF- (zext)
https://alive2.llvm.org/ce/z/GVxwQz (sext)

We already handle `vector_reduce_and(<n x i1>)`,
so let's just combine into the already-handled pattern
and let the existing fold do the rest.
  • Loading branch information
LebedevRI committed Aug 2, 2021
1 parent 7888cfe commit 0c13798
Show file tree
Hide file tree
Showing 2 changed files with 54 additions and 23 deletions.
24 changes: 23 additions & 1 deletion llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
Expand Up @@ -2067,8 +2067,30 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
}
LLVM_FALLTHROUGH;
}
case Intrinsic::vector_reduce_umin: {
if (IID == Intrinsic::vector_reduce_umin) {
// UMin reduction over the vector with (potentially-extended)
// i1 element type is actually a (potentially-extended)
// logical `and` reduction over the original non-extended value:
// vector_reduce_umin(?ext(<n x i1>))
// -->
// ?ext(vector_reduce_and(<n x i1>))
Value *Arg = II->getArgOperand(0);
Value *Vect;
if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) {
if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType()))
if (FTy->getElementType() == Builder.getInt1Ty()) {
Value *Res = Builder.CreateAndReduce(Vect);
if (Arg != Vect)
Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res,
II->getType());
return replaceInstUsesWith(CI, Res);
}
}
}
LLVM_FALLTHROUGH;
}
case Intrinsic::vector_reduce_umax:
case Intrinsic::vector_reduce_umin:
case Intrinsic::vector_reduce_smax:
case Intrinsic::vector_reduce_smin:
case Intrinsic::vector_reduce_fmax:
Expand Down
53 changes: 31 additions & 22 deletions llvm/test/Transforms/InstCombine/reduction-umin-sext-zext-i1.ll
Expand Up @@ -3,18 +3,20 @@

define i1 @reduce_umin_self(<8 x i1> %x) {
; CHECK-LABEL: @reduce_umin_self(
; CHECK-NEXT: [[RES:%.*]] = call i1 @llvm.vector.reduce.umin.v8i1(<8 x i1> [[X:%.*]])
; CHECK-NEXT: ret i1 [[RES]]
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i1> [[X:%.*]] to i8
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], -1
; CHECK-NEXT: ret i1 [[TMP2]]
;
%res = call i1 @llvm.vector.reduce.umin.v8i32(<8 x i1> %x)
ret i1 %res
}

define i32 @reduce_umin_sext(<4 x i1> %x) {
; CHECK-LABEL: @reduce_umin_sext(
; CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[X:%.*]] to <4 x i32>
; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> [[SEXT]])
; CHECK-NEXT: ret i32 [[RES]]
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i1> [[X:%.*]] to i4
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i4 [[TMP1]], -1
; CHECK-NEXT: [[TMP3:%.*]] = sext i1 [[TMP2]] to i32
; CHECK-NEXT: ret i32 [[TMP3]]
;
%sext = sext <4 x i1> %x to <4 x i32>
%res = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %sext)
Expand All @@ -23,9 +25,10 @@ define i32 @reduce_umin_sext(<4 x i1> %x) {

define i64 @reduce_umin_zext(<8 x i1> %x) {
; CHECK-LABEL: @reduce_umin_zext(
; CHECK-NEXT: [[ZEXT:%.*]] = zext <8 x i1> [[X:%.*]] to <8 x i64>
; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.vector.reduce.umin.v8i64(<8 x i64> [[ZEXT]])
; CHECK-NEXT: ret i64 [[RES]]
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i1> [[X:%.*]] to i8
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], -1
; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i64
; CHECK-NEXT: ret i64 [[TMP3]]
;
%zext = zext <8 x i1> %x to <8 x i64>
%res = call i64 @llvm.vector.reduce.umin.v8i64(<8 x i64> %zext)
Expand All @@ -34,9 +37,10 @@ define i64 @reduce_umin_zext(<8 x i1> %x) {

define i16 @reduce_umin_sext_same(<16 x i1> %x) {
; CHECK-LABEL: @reduce_umin_sext_same(
; CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[X:%.*]] to <16 x i16>
; CHECK-NEXT: [[RES:%.*]] = call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> [[SEXT]])
; CHECK-NEXT: ret i16 [[RES]]
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i1> [[X:%.*]] to i16
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i16 [[TMP1]], -1
; CHECK-NEXT: [[TMP3:%.*]] = sext i1 [[TMP2]] to i16
; CHECK-NEXT: ret i16 [[TMP3]]
;
%sext = sext <16 x i1> %x to <16 x i16>
%res = call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> %sext)
Expand All @@ -45,9 +49,10 @@ define i16 @reduce_umin_sext_same(<16 x i1> %x) {

define i8 @reduce_umin_zext_long(<128 x i1> %x) {
; CHECK-LABEL: @reduce_umin_zext_long(
; CHECK-NEXT: [[SEXT:%.*]] = sext <128 x i1> [[X:%.*]] to <128 x i8>
; CHECK-NEXT: [[RES:%.*]] = call i8 @llvm.vector.reduce.umin.v128i8(<128 x i8> [[SEXT]])
; CHECK-NEXT: ret i8 [[RES]]
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <128 x i1> [[X:%.*]] to i128
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i128 [[TMP1]], -1
; CHECK-NEXT: [[TMP3:%.*]] = sext i1 [[TMP2]] to i8
; CHECK-NEXT: ret i8 [[TMP3]]
;
%sext = sext <128 x i1> %x to <128 x i8>
%res = call i8 @llvm.vector.reduce.umin.v128i8(<128 x i8> %sext)
Expand All @@ -57,11 +62,13 @@ define i8 @reduce_umin_zext_long(<128 x i1> %x) {
@glob = external global i8, align 1
define i8 @reduce_umin_zext_long_external_use(<128 x i1> %x) {
; CHECK-LABEL: @reduce_umin_zext_long_external_use(
; CHECK-NEXT: [[SEXT:%.*]] = sext <128 x i1> [[X:%.*]] to <128 x i8>
; CHECK-NEXT: [[RES:%.*]] = call i8 @llvm.vector.reduce.umin.v128i8(<128 x i8> [[SEXT]])
; CHECK-NEXT: [[EXT:%.*]] = extractelement <128 x i8> [[SEXT]], i32 0
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <128 x i1> [[X:%.*]] to i128
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i128 [[TMP1]], -1
; CHECK-NEXT: [[TMP3:%.*]] = sext i1 [[TMP2]] to i8
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <128 x i1> [[X]], i32 0
; CHECK-NEXT: [[EXT:%.*]] = sext i1 [[TMP4]] to i8
; CHECK-NEXT: store i8 [[EXT]], i8* @glob, align 1
; CHECK-NEXT: ret i8 [[RES]]
; CHECK-NEXT: ret i8 [[TMP3]]
;
%sext = sext <128 x i1> %x to <128 x i8>
%res = call i8 @llvm.vector.reduce.umin.v128i8(<128 x i8> %sext)
Expand All @@ -73,11 +80,13 @@ define i8 @reduce_umin_zext_long_external_use(<128 x i1> %x) {
@glob1 = external global i64, align 8
define i64 @reduce_umin_zext_external_use(<8 x i1> %x) {
; CHECK-LABEL: @reduce_umin_zext_external_use(
; CHECK-NEXT: [[ZEXT:%.*]] = zext <8 x i1> [[X:%.*]] to <8 x i64>
; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.vector.reduce.umin.v8i64(<8 x i64> [[ZEXT]])
; CHECK-NEXT: [[EXT:%.*]] = extractelement <8 x i64> [[ZEXT]], i32 0
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i1> [[X:%.*]] to i8
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], -1
; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <8 x i1> [[X]], i32 0
; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[TMP4]] to i64
; CHECK-NEXT: store i64 [[EXT]], i64* @glob1, align 8
; CHECK-NEXT: ret i64 [[RES]]
; CHECK-NEXT: ret i64 [[TMP3]]
;
%zext = zext <8 x i1> %x to <8 x i64>
%res = call i64 @llvm.vector.reduce.umin.v8i64(<8 x i64> %zext)
Expand Down

0 comments on commit 0c13798

Please sign in to comment.