diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp index 21bfc91148bfe..9f220ec003ec3 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp @@ -1284,7 +1284,11 @@ Instruction *InstCombinerImpl::foldSelectValueEquivalence(SelectInst &Sel, isGuaranteedNotToBeUndefOrPoison(CmpRHS, SQ.AC, &Sel, &DT)) { if (Value *V = simplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, SQ, /* AllowRefinement */ true)) - return replaceOperand(Sel, Swapped ? 2 : 1, V); + // Require either the replacement or the simplification result to be a + // constant to avoid infinite loops. + // FIXME: Make this check more precise. + if (isa(CmpRHS) || isa(V)) + return replaceOperand(Sel, Swapped ? 2 : 1, V); // Even if TrueVal does not simplify, we can directly replace a use of // CmpLHS with CmpRHS, as long as the instruction is not used anywhere @@ -1302,7 +1306,8 @@ Instruction *InstCombinerImpl::foldSelectValueEquivalence(SelectInst &Sel, isGuaranteedNotToBeUndefOrPoison(CmpLHS, SQ.AC, &Sel, &DT)) if (Value *V = simplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, SQ, /* AllowRefinement */ true)) - return replaceOperand(Sel, Swapped ? 2 : 1, V); + if (isa(CmpLHS) || isa(V)) + return replaceOperand(Sel, Swapped ? 2 : 1, V); auto *FalseInst = dyn_cast(FalseVal); if (!FalseInst) diff --git a/llvm/test/Transforms/InstCombine/select.ll b/llvm/test/Transforms/InstCombine/select.ll index c5f1b77c6d740..b7e743c14a52c 100644 --- a/llvm/test/Transforms/InstCombine/select.ll +++ b/llvm/test/Transforms/InstCombine/select.ll @@ -2849,12 +2849,14 @@ define i8 @select_replacement_sub(i8 %x, i8 %y, i8 %z) { ret i8 %sel } +; FIXME: This is safe to fold. define i8 @select_replacement_shift_noundef(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @select_replacement_shift_noundef( ; CHECK-NEXT: [[SHR:%.*]] = lshr exact i8 [[X:%.*]], 1 ; CHECK-NEXT: call void @use_i8(i8 noundef [[SHR]]) ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[SHR]], [[Y:%.*]] -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i8 [[X]], i8 [[Z:%.*]] +; CHECK-NEXT: [[SHL:%.*]] = shl i8 [[Y]], 1 +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i8 [[SHL]], i8 [[Z:%.*]] ; CHECK-NEXT: ret i8 [[SEL]] ; %shr = lshr exact i8 %x, 1 @@ -2904,6 +2906,40 @@ define i32 @select_replacement_loop2(i32 %arg, i32 %arg2) { ret i32 %sel } +define i8 @select_replacement_loop3(i32 noundef %x) { +; CHECK-LABEL: @select_replacement_loop3( +; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[X:%.*]] to i8 +; CHECK-NEXT: [[REV:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[TRUNC]]) +; CHECK-NEXT: [[EXT:%.*]] = zext i8 [[REV]] to i32 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[EXT]], [[X]] +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i8 [[TRUNC]], i8 0 +; CHECK-NEXT: ret i8 [[SEL]] +; + %trunc = trunc i32 %x to i8 + %rev = call i8 @llvm.bitreverse.i8(i8 %trunc) + %ext = zext i8 %rev to i32 + %cmp = icmp eq i32 %ext, %x + %sel = select i1 %cmp, i8 %trunc, i8 0 + ret i8 %sel +} + +define i16 @select_replacement_loop4(i16 noundef %p_12) { +; CHECK-LABEL: @select_replacement_loop4( +; CHECK-NEXT: [[CMP1:%.*]] = icmp ult i16 [[P_12:%.*]], 2 +; CHECK-NEXT: [[AND1:%.*]] = and i16 [[P_12]], 1 +; CHECK-NEXT: [[AND2:%.*]] = select i1 [[CMP1]], i16 [[AND1]], i16 0 +; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i16 [[AND2]], [[P_12]] +; CHECK-NEXT: [[AND3:%.*]] = select i1 [[CMP2]], i16 [[AND1]], i16 0 +; CHECK-NEXT: ret i16 [[AND3]] +; + %cmp1 = icmp ult i16 %p_12, 2 + %and1 = and i16 %p_12, 1 + %and2 = select i1 %cmp1, i16 %and1, i16 0 + %cmp2 = icmp eq i16 %and2, %p_12 + %and3 = select i1 %cmp2, i16 %and1, i16 0 + ret i16 %and3 +} + define ptr @select_replacement_gep_inbounds(ptr %base, i64 %offset) { ; CHECK-LABEL: @select_replacement_gep_inbounds( ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[BASE:%.*]], i64 [[OFFSET:%.*]]