From a8649a299c1e00ac004c51faa8fc4a6e30666998 Mon Sep 17 00:00:00 2001 From: Florian Hahn Date: Sat, 6 Sep 2025 20:56:53 +0100 Subject: [PATCH 1/3] [InstSimplify] Add tests with extractvalue (umul_with_overflow) --- .../InstSimplify/fold-intrinsics.ll | 72 +++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/llvm/test/Transforms/InstSimplify/fold-intrinsics.ll b/llvm/test/Transforms/InstSimplify/fold-intrinsics.ll index 45f5e3768725f..f5689b7fd6d2c 100644 --- a/llvm/test/Transforms/InstSimplify/fold-intrinsics.ll +++ b/llvm/test/Transforms/InstSimplify/fold-intrinsics.ll @@ -610,3 +610,75 @@ define void @umul_fix_sat_poison(ptr %P) { ret void } + +declare void @use.i32(i32, i1) + +define void @umul_extractvalue(ptr %P, i32 %x) { +; CHECK-LABEL: @umul_extractvalue( +; CHECK-NEXT: [[UMUL_1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[X:%.*]], i32 1) +; CHECK-NEXT: [[R_1:%.*]] = extractvalue { i32, i1 } [[UMUL_1]], 0 +; CHECK-NEXT: [[OV_1:%.*]] = extractvalue { i32, i1 } [[UMUL_1]], 1 +; CHECK-NEXT: call void @use.i32(i32 [[R_1]], i1 [[OV_1]]) +; CHECK-NEXT: [[UMUL_2:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 [[X]]) +; CHECK-NEXT: [[R_2:%.*]] = extractvalue { i32, i1 } [[UMUL_2]], 0 +; CHECK-NEXT: [[OV_2:%.*]] = extractvalue { i32, i1 } [[UMUL_2]], 1 +; CHECK-NEXT: call void @use.i32(i32 [[R_2]], i1 [[OV_2]]) +; CHECK-NEXT: [[UMUL_3:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[X]]) +; CHECK-NEXT: [[R_3:%.*]] = extractvalue { i32, i1 } [[UMUL_3]], 0 +; CHECK-NEXT: [[OV_3:%.*]] = extractvalue { i32, i1 } [[UMUL_3]], 1 +; CHECK-NEXT: call void @use.i32(i32 [[R_3]], i1 [[OV_3]]) +; CHECK-NEXT: ret void +; + %umul.1 = call {i32, i1} @llvm.umul.with.overflow(i32 %x, i32 1) + %r.1 = extractvalue {i32, i1} %umul.1, 0 + %ov.1 = extractvalue {i32, i1} %umul.1, 1 + call void @use.i32(i32 %r.1, i1 %ov.1) + + %umul.2 = call {i32, i1} @llvm.umul.with.overflow(i32 1, i32 %x) + %r.2 = extractvalue {i32, i1} %umul.2, 0 + %ov.2 = extractvalue {i32, i1} %umul.2, 1 + call void @use.i32(i32 %r.2, i1 %ov.2) + + %umul.3 = call {i32, i1} @llvm.umul.with.overflow(i32 2, i32 %x) + %r.3 = extractvalue {i32, i1} %umul.3, 0 + %ov.3 = extractvalue {i32, i1} %umul.3, 1 + call void @use.i32(i32 %r.3, i1 %ov.3) + + ret void +} + +declare void @use.4xi32(<4 x i32>, <4 x i1>) + +define void @umul_extractvalue_vec(ptr %P, <4 x i32> %x) { +; CHECK-LABEL: @umul_extractvalue_vec( +; CHECK-NEXT: [[UMUL_1:%.*]] = call { <4 x i32>, <4 x i1> } @llvm.umul.with.overflow.v4i32(<4 x i32> [[X:%.*]], <4 x i32> splat (i32 1)) +; CHECK-NEXT: [[R_1:%.*]] = extractvalue { <4 x i32>, <4 x i1> } [[UMUL_1]], 0 +; CHECK-NEXT: [[OV_1:%.*]] = extractvalue { <4 x i32>, <4 x i1> } [[UMUL_1]], 1 +; CHECK-NEXT: call void @use.4xi32(<4 x i32> [[R_1]], <4 x i1> [[OV_1]]) +; CHECK-NEXT: [[UMUL_2:%.*]] = call { <4 x i32>, <4 x i1> } @llvm.umul.with.overflow.v4i32(<4 x i32> splat (i32 1), <4 x i32> [[X]]) +; CHECK-NEXT: [[R_2:%.*]] = extractvalue { <4 x i32>, <4 x i1> } [[UMUL_2]], 0 +; CHECK-NEXT: [[OV_2:%.*]] = extractvalue { <4 x i32>, <4 x i1> } [[UMUL_2]], 1 +; CHECK-NEXT: call void @use.4xi32(<4 x i32> [[R_2]], <4 x i1> [[OV_2]]) +; CHECK-NEXT: [[UMUL_3:%.*]] = call { <4 x i32>, <4 x i1> } @llvm.umul.with.overflow.v4i32(<4 x i32> splat (i32 2), <4 x i32> [[X]]) +; CHECK-NEXT: [[R_3:%.*]] = extractvalue { <4 x i32>, <4 x i1> } [[UMUL_3]], 0 +; CHECK-NEXT: [[OV_3:%.*]] = extractvalue { <4 x i32>, <4 x i1> } [[UMUL_3]], 1 +; CHECK-NEXT: call void @use.4xi32(<4 x i32> [[R_3]], <4 x i1> [[OV_3]]) +; CHECK-NEXT: ret void +; + %umul.1 = call {<4 x i32>, <4 x i1>} @llvm.umul.with.overflow.v4i32(<4 x i32> %x, <4 x i32> ) + %r.1 = extractvalue {<4 x i32>, <4 x i1>} %umul.1, 0 + %ov.1 = extractvalue {<4 x i32>, <4 x i1>} %umul.1, 1 + call void @use.4xi32(<4 x i32> %r.1, <4 x i1> %ov.1) + + %umul.2 = call {<4 x i32>, <4 x i1>} @llvm.umul.with.overflow.v4i32(<4 x i32> , <4 x i32> %x) + %r.2 = extractvalue {<4 x i32>, <4 x i1>} %umul.2, 0 + %ov.2 = extractvalue {<4 x i32>, <4 x i1>} %umul.2, 1 + call void @use.4xi32(<4 x i32> %r.2, <4 x i1> %ov.2) + + %umul.3 = call {<4 x i32>, <4 x i1>} @llvm.umul.with.overflow.v4i32(<4 x i32> , <4 x i32> %x) + %r.3 = extractvalue {<4 x i32>, <4 x i1>} %umul.3, 0 + %ov.3 = extractvalue {<4 x i32>, <4 x i1>} %umul.3, 1 + call void @use.4xi32(<4 x i32> %r.3, <4 x i1> %ov.3) + + ret void +} From 036f549cd02d3713ee03b46b33337c366a024227 Mon Sep 17 00:00:00 2001 From: Florian Hahn Date: Sat, 6 Sep 2025 21:00:16 +0100 Subject: [PATCH 2/3] [InstSimplify] Simplify extractvalue (umul_with_overflow(x, 1)). Look through extractvalue to simplify umul_with_overflow where one of the operands is 1. This removes some redundant instructions when expanding SCEVs, which in turn makes the runtime check cost estimate more accurate, reducing the minimum iterations for which vectorization is profitable. --- llvm/lib/Analysis/InstructionSimplify.cpp | 13 ++++++++ .../InstSimplify/fold-intrinsics.ll | 8 ----- .../PowerPC/optimal-epilog-vectorization.ll | 14 +++----- .../RISCV/riscv-vector-reverse.ll | 32 ++++++------------- .../Transforms/LoopVectorize/X86/pr35432.ll | 9 ++---- .../optimal-epilog-vectorization.ll | 7 ++-- llvm/test/Transforms/LoopVectorize/pr37248.ll | 14 +++----- .../LoopVectorize/reverse_induction.ll | 7 ++-- 8 files changed, 38 insertions(+), 66 deletions(-) diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp index 5907e21065331..33a925d885411 100644 --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -5242,6 +5242,19 @@ static Value *simplifyExtractValueInst(Value *Agg, ArrayRef Idxs, } } + // Simplify umul_with_overflow where one operand is 1. + Value *V; + if (Idxs.size() == 1 && + (match(Agg, + m_Intrinsic(m_Value(V), m_One())) || + match(Agg, m_Intrinsic(m_One(), + m_Value(V))))) { + if (Idxs[0] == 0) + return V; + assert(Idxs[0] == 1 && "invalid index"); + return getFalse(IntegerType::get(V->getContext(), 1)); + } + return nullptr; } diff --git a/llvm/test/Transforms/InstSimplify/fold-intrinsics.ll b/llvm/test/Transforms/InstSimplify/fold-intrinsics.ll index f5689b7fd6d2c..2faefd0edb697 100644 --- a/llvm/test/Transforms/InstSimplify/fold-intrinsics.ll +++ b/llvm/test/Transforms/InstSimplify/fold-intrinsics.ll @@ -615,14 +615,6 @@ declare void @use.i32(i32, i1) define void @umul_extractvalue(ptr %P, i32 %x) { ; CHECK-LABEL: @umul_extractvalue( -; CHECK-NEXT: [[UMUL_1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[X:%.*]], i32 1) -; CHECK-NEXT: [[R_1:%.*]] = extractvalue { i32, i1 } [[UMUL_1]], 0 -; CHECK-NEXT: [[OV_1:%.*]] = extractvalue { i32, i1 } [[UMUL_1]], 1 -; CHECK-NEXT: call void @use.i32(i32 [[R_1]], i1 [[OV_1]]) -; CHECK-NEXT: [[UMUL_2:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 [[X]]) -; CHECK-NEXT: [[R_2:%.*]] = extractvalue { i32, i1 } [[UMUL_2]], 0 -; CHECK-NEXT: [[OV_2:%.*]] = extractvalue { i32, i1 } [[UMUL_2]], 1 -; CHECK-NEXT: call void @use.i32(i32 [[R_2]], i1 [[OV_2]]) ; CHECK-NEXT: [[UMUL_3:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[X]]) ; CHECK-NEXT: [[R_3:%.*]] = extractvalue { i32, i1 } [[UMUL_3]], 0 ; CHECK-NEXT: [[OV_3:%.*]] = extractvalue { i32, i1 } [[UMUL_3]], 1 diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization.ll index 64b0a745c169e..597339b906e0b 100644 --- a/llvm/test/Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization.ll +++ b/llvm/test/Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization.ll @@ -245,13 +245,10 @@ define void @f2(ptr noalias %A, ptr noalias %B, i32 %n) { ; VF-TWO-CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N]], -1 ; VF-TWO-CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32 ; VF-TWO-CHECK-NEXT: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 [[TMP2]]) -; VF-TWO-CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0 -; VF-TWO-CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1 -; VF-TWO-CHECK-NEXT: [[TMP3:%.*]] = sub i32 [[TMP0]], [[MUL_RESULT]] +; VF-TWO-CHECK-NEXT: [[TMP3:%.*]] = sub i32 [[TMP0]], [[TMP2]] ; VF-TWO-CHECK-NEXT: [[TMP4:%.*]] = icmp sgt i32 [[TMP3]], [[TMP0]] -; VF-TWO-CHECK-NEXT: [[TMP5:%.*]] = or i1 [[TMP4]], [[MUL_OVERFLOW]] ; VF-TWO-CHECK-NEXT: [[TMP6:%.*]] = icmp ugt i64 [[TMP1]], 4294967295 -; VF-TWO-CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]] +; VF-TWO-CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP4]], [[TMP6]] ; VF-TWO-CHECK-NEXT: br i1 [[TMP7]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] ; VF-TWO-CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: ; VF-TWO-CHECK-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 32 @@ -373,13 +370,10 @@ define void @f2(ptr noalias %A, ptr noalias %B, i32 %n) { ; VF-FOUR-CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N]], -1 ; VF-FOUR-CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32 ; VF-FOUR-CHECK-NEXT: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 [[TMP2]]) -; VF-FOUR-CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0 -; VF-FOUR-CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1 -; VF-FOUR-CHECK-NEXT: [[TMP3:%.*]] = sub i32 [[TMP0]], [[MUL_RESULT]] +; VF-FOUR-CHECK-NEXT: [[TMP3:%.*]] = sub i32 [[TMP0]], [[TMP2]] ; VF-FOUR-CHECK-NEXT: [[TMP4:%.*]] = icmp sgt i32 [[TMP3]], [[TMP0]] -; VF-FOUR-CHECK-NEXT: [[TMP5:%.*]] = or i1 [[TMP4]], [[MUL_OVERFLOW]] ; VF-FOUR-CHECK-NEXT: [[TMP6:%.*]] = icmp ugt i64 [[TMP1]], 4294967295 -; VF-FOUR-CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]] +; VF-FOUR-CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP4]], [[TMP6]] ; VF-FOUR-CHECK-NEXT: br i1 [[TMP7]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] ; VF-FOUR-CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: ; VF-FOUR-CHECK-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 32 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll index 8272e6d82295c..1c7851577d4e6 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll @@ -194,14 +194,11 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur ; RV64-NEXT: [[TMP4:%.*]] = add i32 [[N]], -1 ; RV64-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP3]] to i32 ; RV64-NEXT: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 [[TMP5]]) -; RV64-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0 -; RV64-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1 -; RV64-NEXT: [[TMP6:%.*]] = sub i32 [[TMP4]], [[MUL_RESULT]] +; RV64-NEXT: [[TMP6:%.*]] = sub i32 [[TMP4]], [[TMP5]] ; RV64-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP6]], [[TMP4]] -; RV64-NEXT: [[TMP8:%.*]] = or i1 [[TMP7]], [[MUL_OVERFLOW]] ; RV64-NEXT: [[TMP9:%.*]] = icmp ugt i64 [[TMP3]], 4294967295 -; RV64-NEXT: [[TMP10:%.*]] = or i1 [[TMP8]], [[TMP9]] -; RV64-NEXT: br i1 [[TMP10]], label %[[SCALAR_PH]], label %[[VECTOR_MEMCHECK:.*]] +; RV64-NEXT: [[TMP8:%.*]] = or i1 [[TMP7]], [[TMP9]] +; RV64-NEXT: br i1 [[TMP8]], label %[[SCALAR_PH]], label %[[VECTOR_MEMCHECK:.*]] ; RV64: [[VECTOR_MEMCHECK]]: ; RV64-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() ; RV64-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4 @@ -334,13 +331,10 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur ; RV64-UF2-NEXT: [[TMP4:%.*]] = add i32 [[N]], -1 ; RV64-UF2-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP3]] to i32 ; RV64-UF2-NEXT: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 [[TMP5]]) -; RV64-UF2-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0 -; RV64-UF2-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1 -; RV64-UF2-NEXT: [[TMP6:%.*]] = sub i32 [[TMP4]], [[MUL_RESULT]] +; RV64-UF2-NEXT: [[TMP6:%.*]] = sub i32 [[TMP4]], [[TMP5]] ; RV64-UF2-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP6]], [[TMP4]] -; RV64-UF2-NEXT: [[TMP8:%.*]] = or i1 [[TMP7]], [[MUL_OVERFLOW]] ; RV64-UF2-NEXT: [[TMP9:%.*]] = icmp ugt i64 [[TMP3]], 4294967295 -; RV64-UF2-NEXT: [[TMP10:%.*]] = or i1 [[TMP8]], [[TMP9]] +; RV64-UF2-NEXT: [[TMP10:%.*]] = or i1 [[TMP7]], [[TMP9]] ; RV64-UF2-NEXT: br i1 [[TMP10]], label %[[SCALAR_PH]], label %[[VECTOR_MEMCHECK:.*]] ; RV64-UF2: [[VECTOR_MEMCHECK]]: ; RV64-UF2-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() @@ -455,14 +449,11 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur ; RV64-NEXT: [[TMP4:%.*]] = add i32 [[N]], -1 ; RV64-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP3]] to i32 ; RV64-NEXT: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 [[TMP5]]) -; RV64-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0 -; RV64-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1 -; RV64-NEXT: [[TMP6:%.*]] = sub i32 [[TMP4]], [[MUL_RESULT]] +; RV64-NEXT: [[TMP6:%.*]] = sub i32 [[TMP4]], [[TMP5]] ; RV64-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP6]], [[TMP4]] -; RV64-NEXT: [[TMP8:%.*]] = or i1 [[TMP7]], [[MUL_OVERFLOW]] ; RV64-NEXT: [[TMP9:%.*]] = icmp ugt i64 [[TMP3]], 4294967295 -; RV64-NEXT: [[TMP10:%.*]] = or i1 [[TMP8]], [[TMP9]] -; RV64-NEXT: br i1 [[TMP10]], label %[[SCALAR_PH]], label %[[VECTOR_MEMCHECK:.*]] +; RV64-NEXT: [[TMP8:%.*]] = or i1 [[TMP7]], [[TMP9]] +; RV64-NEXT: br i1 [[TMP8]], label %[[SCALAR_PH]], label %[[VECTOR_MEMCHECK:.*]] ; RV64: [[VECTOR_MEMCHECK]]: ; RV64-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() ; RV64-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4 @@ -595,13 +586,10 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur ; RV64-UF2-NEXT: [[TMP4:%.*]] = add i32 [[N]], -1 ; RV64-UF2-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP3]] to i32 ; RV64-UF2-NEXT: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 [[TMP5]]) -; RV64-UF2-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0 -; RV64-UF2-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1 -; RV64-UF2-NEXT: [[TMP6:%.*]] = sub i32 [[TMP4]], [[MUL_RESULT]] +; RV64-UF2-NEXT: [[TMP6:%.*]] = sub i32 [[TMP4]], [[TMP5]] ; RV64-UF2-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP6]], [[TMP4]] -; RV64-UF2-NEXT: [[TMP8:%.*]] = or i1 [[TMP7]], [[MUL_OVERFLOW]] ; RV64-UF2-NEXT: [[TMP9:%.*]] = icmp ugt i64 [[TMP3]], 4294967295 -; RV64-UF2-NEXT: [[TMP10:%.*]] = or i1 [[TMP8]], [[TMP9]] +; RV64-UF2-NEXT: [[TMP10:%.*]] = or i1 [[TMP7]], [[TMP9]] ; RV64-UF2-NEXT: br i1 [[TMP10]], label %[[SCALAR_PH]], label %[[VECTOR_MEMCHECK:.*]] ; RV64-UF2: [[VECTOR_MEMCHECK]]: ; RV64-UF2-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr35432.ll b/llvm/test/Transforms/LoopVectorize/X86/pr35432.ll index 6fc7ed2efe852..f7dfb1f4490c1 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/pr35432.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr35432.ll @@ -38,7 +38,7 @@ define i32 @main(ptr %ptr) { ; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP2]], 1 ; CHECK-NEXT: [[UMIN1:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP0]], i32 [[TMP2]]) ; CHECK-NEXT: [[TMP4:%.*]] = sub i32 [[TMP3]], [[UMIN1]] -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP4]], 40 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP4]], 36 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]] ; CHECK: vector.scevcheck: ; CHECK-NEXT: [[TMP5:%.*]] = add i8 [[CONV3]], -1 @@ -47,13 +47,10 @@ define i32 @main(ptr %ptr) { ; CHECK-NEXT: [[TMP7:%.*]] = sub i32 [[TMP6]], [[UMIN]] ; CHECK-NEXT: [[TMP8:%.*]] = trunc i32 [[TMP7]] to i8 ; CHECK-NEXT: [[MUL:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 1, i8 [[TMP8]]) -; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i8, i1 } [[MUL]], 0 -; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i8, i1 } [[MUL]], 1 -; CHECK-NEXT: [[TMP9:%.*]] = sub i8 [[TMP5]], [[MUL_RESULT]] +; CHECK-NEXT: [[TMP9:%.*]] = sub i8 [[TMP5]], [[TMP8]] ; CHECK-NEXT: [[TMP10:%.*]] = icmp ugt i8 [[TMP9]], [[TMP5]] -; CHECK-NEXT: [[TMP11:%.*]] = or i1 [[TMP10]], [[MUL_OVERFLOW]] ; CHECK-NEXT: [[TMP12:%.*]] = icmp ugt i32 [[TMP7]], 255 -; CHECK-NEXT: [[TMP13:%.*]] = or i1 [[TMP11]], [[TMP12]] +; CHECK-NEXT: [[TMP13:%.*]] = or i1 [[TMP10]], [[TMP12]] ; CHECK-NEXT: [[TMP14:%.*]] = add i32 [[DOTPROMOTED]], 1 ; CHECK-NEXT: [[TMP15:%.*]] = add i32 [[TMP14]], [[TMP7]] ; CHECK-NEXT: [[TMP16:%.*]] = icmp slt i32 [[TMP15]], [[TMP14]] diff --git a/llvm/test/Transforms/LoopVectorize/optimal-epilog-vectorization.ll b/llvm/test/Transforms/LoopVectorize/optimal-epilog-vectorization.ll index fd39cabd5fc69..b4bb53f01e3ae 100644 --- a/llvm/test/Transforms/LoopVectorize/optimal-epilog-vectorization.ll +++ b/llvm/test/Transforms/LoopVectorize/optimal-epilog-vectorization.ll @@ -156,13 +156,10 @@ define dso_local signext i32 @f2(ptr noalias %A, ptr noalias %B, i32 signext %n) ; CHECK-NEXT: [[TMP1:%.*]] = add nsw i64 [[WIDE_TRIP_COUNT]], -1 ; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32 ; CHECK-NEXT: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 [[TMP2]]) -; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0 -; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1 -; CHECK-NEXT: [[TMP3:%.*]] = sub i32 [[TMP0]], [[MUL_RESULT]] +; CHECK-NEXT: [[TMP3:%.*]] = sub i32 [[TMP0]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp sgt i32 [[TMP3]], [[TMP0]] -; CHECK-NEXT: [[TMP5:%.*]] = or i1 [[TMP4]], [[MUL_OVERFLOW]] ; CHECK-NEXT: [[TMP6:%.*]] = icmp ugt i64 [[TMP1]], 4294967295 -; CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]] +; CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP4]], [[TMP6]] ; CHECK-NEXT: br i1 [[TMP7]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] ; CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: ; CHECK-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 4 diff --git a/llvm/test/Transforms/LoopVectorize/pr37248.ll b/llvm/test/Transforms/LoopVectorize/pr37248.ll index c78b14f787131..a588008a1d44d 100644 --- a/llvm/test/Transforms/LoopVectorize/pr37248.ll +++ b/llvm/test/Transforms/LoopVectorize/pr37248.ll @@ -27,13 +27,10 @@ define void @f1(ptr noalias %b, i1 %c, i32 %start) { ; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[START]] to i16 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP2]] to i16 ; CHECK-NEXT: [[MUL:%.*]] = call { i16, i1 } @llvm.umul.with.overflow.i16(i16 1, i16 [[TMP4]]) -; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i16, i1 } [[MUL]], 0 -; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i16, i1 } [[MUL]], 1 -; CHECK-NEXT: [[TMP5:%.*]] = sub i16 [[TMP3]], [[MUL_RESULT]] +; CHECK-NEXT: [[TMP5:%.*]] = sub i16 [[TMP3]], [[TMP4]] ; CHECK-NEXT: [[TMP6:%.*]] = icmp sgt i16 [[TMP5]], [[TMP3]] -; CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP6]], [[MUL_OVERFLOW]] ; CHECK-NEXT: [[TMP8:%.*]] = icmp ugt i32 [[TMP2]], 65535 -; CHECK-NEXT: [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]] +; CHECK-NEXT: [[TMP9:%.*]] = or i1 [[TMP6]], [[TMP8]] ; CHECK-NEXT: br i1 [[TMP9]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP1]], 2 @@ -109,13 +106,10 @@ define void @f2(ptr noalias %b, i1 %c, i32 %start) { ; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[START]] to i16 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP2]] to i16 ; CHECK-NEXT: [[MUL:%.*]] = call { i16, i1 } @llvm.umul.with.overflow.i16(i16 1, i16 [[TMP4]]) -; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i16, i1 } [[MUL]], 0 -; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i16, i1 } [[MUL]], 1 -; CHECK-NEXT: [[TMP5:%.*]] = sub i16 [[TMP3]], [[MUL_RESULT]] +; CHECK-NEXT: [[TMP5:%.*]] = sub i16 [[TMP3]], [[TMP4]] ; CHECK-NEXT: [[TMP6:%.*]] = icmp sgt i16 [[TMP5]], [[TMP3]] -; CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP6]], [[MUL_OVERFLOW]] ; CHECK-NEXT: [[TMP8:%.*]] = icmp ugt i32 [[TMP2]], 65535 -; CHECK-NEXT: [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]] +; CHECK-NEXT: [[TMP9:%.*]] = or i1 [[TMP6]], [[TMP8]] ; CHECK-NEXT: br i1 [[TMP9]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP1]], 2 diff --git a/llvm/test/Transforms/LoopVectorize/reverse_induction.ll b/llvm/test/Transforms/LoopVectorize/reverse_induction.ll index 010ce6e8433ae..d24a5f567e3b9 100644 --- a/llvm/test/Transforms/LoopVectorize/reverse_induction.ll +++ b/llvm/test/Transforms/LoopVectorize/reverse_induction.ll @@ -150,12 +150,9 @@ define i32 @reverse_induction_i16(i16 %startval, ptr %ptr) { ; CHECK: [[VECTOR_SCEVCHECK]]: ; CHECK-NEXT: [[TMP0:%.*]] = add i16 [[STARTVAL]], -1 ; CHECK-NEXT: [[MUL:%.*]] = call { i16, i1 } @llvm.umul.with.overflow.i16(i16 1, i16 1023) -; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i16, i1 } [[MUL]], 0 -; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i16, i1 } [[MUL]], 1 -; CHECK-NEXT: [[TMP1:%.*]] = sub i16 [[TMP0]], [[MUL_RESULT]] +; CHECK-NEXT: [[TMP1:%.*]] = sub i16 [[TMP0]], 1023 ; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i16 [[TMP1]], [[TMP0]] -; CHECK-NEXT: [[TMP3:%.*]] = or i1 [[TMP2]], [[MUL_OVERFLOW]] -; CHECK-NEXT: br i1 [[TMP3]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 [[TMP2]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: From a3194af7ca500085c670052d785cf04c14cffab8 Mon Sep 17 00:00:00 2001 From: Florian Hahn Date: Sun, 7 Sep 2025 11:00:10 +0100 Subject: [PATCH 3/3] !fixup handle creating vector false constant --- llvm/lib/Analysis/InstructionSimplify.cpp | 2 +- llvm/test/Transforms/InstSimplify/fold-intrinsics.ll | 12 ++++-------- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp index 33a925d885411..ebe329aa1d5fe 100644 --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -5252,7 +5252,7 @@ static Value *simplifyExtractValueInst(Value *Agg, ArrayRef Idxs, if (Idxs[0] == 0) return V; assert(Idxs[0] == 1 && "invalid index"); - return getFalse(IntegerType::get(V->getContext(), 1)); + return getFalse(CmpInst::makeCmpResultType(V->getType())); } return nullptr; diff --git a/llvm/test/Transforms/InstSimplify/fold-intrinsics.ll b/llvm/test/Transforms/InstSimplify/fold-intrinsics.ll index 2faefd0edb697..ef97d42834ac8 100644 --- a/llvm/test/Transforms/InstSimplify/fold-intrinsics.ll +++ b/llvm/test/Transforms/InstSimplify/fold-intrinsics.ll @@ -615,6 +615,8 @@ declare void @use.i32(i32, i1) define void @umul_extractvalue(ptr %P, i32 %x) { ; CHECK-LABEL: @umul_extractvalue( +; CHECK-NEXT: call void @use.i32(i32 [[X:%.*]], i1 false) +; CHECK-NEXT: call void @use.i32(i32 [[X]], i1 false) ; CHECK-NEXT: [[UMUL_3:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[X]]) ; CHECK-NEXT: [[R_3:%.*]] = extractvalue { i32, i1 } [[UMUL_3]], 0 ; CHECK-NEXT: [[OV_3:%.*]] = extractvalue { i32, i1 } [[UMUL_3]], 1 @@ -643,14 +645,8 @@ declare void @use.4xi32(<4 x i32>, <4 x i1>) define void @umul_extractvalue_vec(ptr %P, <4 x i32> %x) { ; CHECK-LABEL: @umul_extractvalue_vec( -; CHECK-NEXT: [[UMUL_1:%.*]] = call { <4 x i32>, <4 x i1> } @llvm.umul.with.overflow.v4i32(<4 x i32> [[X:%.*]], <4 x i32> splat (i32 1)) -; CHECK-NEXT: [[R_1:%.*]] = extractvalue { <4 x i32>, <4 x i1> } [[UMUL_1]], 0 -; CHECK-NEXT: [[OV_1:%.*]] = extractvalue { <4 x i32>, <4 x i1> } [[UMUL_1]], 1 -; CHECK-NEXT: call void @use.4xi32(<4 x i32> [[R_1]], <4 x i1> [[OV_1]]) -; CHECK-NEXT: [[UMUL_2:%.*]] = call { <4 x i32>, <4 x i1> } @llvm.umul.with.overflow.v4i32(<4 x i32> splat (i32 1), <4 x i32> [[X]]) -; CHECK-NEXT: [[R_2:%.*]] = extractvalue { <4 x i32>, <4 x i1> } [[UMUL_2]], 0 -; CHECK-NEXT: [[OV_2:%.*]] = extractvalue { <4 x i32>, <4 x i1> } [[UMUL_2]], 1 -; CHECK-NEXT: call void @use.4xi32(<4 x i32> [[R_2]], <4 x i1> [[OV_2]]) +; CHECK-NEXT: call void @use.4xi32(<4 x i32> [[X:%.*]], <4 x i1> zeroinitializer) +; CHECK-NEXT: call void @use.4xi32(<4 x i32> [[X]], <4 x i1> zeroinitializer) ; CHECK-NEXT: [[UMUL_3:%.*]] = call { <4 x i32>, <4 x i1> } @llvm.umul.with.overflow.v4i32(<4 x i32> splat (i32 2), <4 x i32> [[X]]) ; CHECK-NEXT: [[R_3:%.*]] = extractvalue { <4 x i32>, <4 x i1> } [[UMUL_3]], 0 ; CHECK-NEXT: [[OV_3:%.*]] = extractvalue { <4 x i32>, <4 x i1> } [[UMUL_3]], 1