diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp index c59cf5a2a86ef..7a59d5d61170e 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -1221,9 +1221,18 @@ Instruction *InstCombinerImpl::visitZExt(ZExtInst &Zext) { } } - if (!Zext.hasNonNeg() && isKnownNonNegative(Src, DL, 0, &AC, &Zext, &DT)) { - Zext.setNonNeg(); - return &Zext; + if (!Zext.hasNonNeg()) { + // If this zero extend is only used by a shift, add nneg flag. + if (Zext.hasOneUse() && SrcTy->getScalarSizeInBits() > 2 && + match(Zext.user_back(), m_Shift(m_Value(), m_Specific(&Zext)))) { + Zext.setNonNeg(); + return &Zext; + } + + if (isKnownNonNegative(Src, DL, 0, &AC, &Zext, &DT)) { + Zext.setNonNeg(); + return &Zext; + } } return nullptr; diff --git a/llvm/test/Transforms/InstCombine/div-shift.ll b/llvm/test/Transforms/InstCombine/div-shift.ll index d208837f04594..9610746811a43 100644 --- a/llvm/test/Transforms/InstCombine/div-shift.ll +++ b/llvm/test/Transforms/InstCombine/div-shift.ll @@ -38,7 +38,7 @@ define <2 x i32> @t1vec(<2 x i16> %x, <2 x i32> %y) { ; rdar://11721329 define i64 @t2(i64 %x, i32 %y) { ; CHECK-LABEL: @t2( -; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[Y:%.*]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[Y:%.*]] to i64 ; CHECK-NEXT: [[TMP2:%.*]] = lshr i64 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i64 [[TMP2]] ; @@ -52,7 +52,7 @@ define i64 @t2(i64 %x, i32 %y) { define i64 @t3(i64 %x, i32 %y) { ; CHECK-LABEL: @t3( ; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[Y:%.*]], 2 -; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64 ; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[X:%.*]], [[TMP2]] ; CHECK-NEXT: ret i64 [[TMP3]] ; diff --git a/llvm/test/Transforms/InstCombine/load-cmp.ll b/llvm/test/Transforms/InstCombine/load-cmp.ll index 56f6e042b3caf..e941284a798ed 100644 --- a/llvm/test/Transforms/InstCombine/load-cmp.ll +++ b/llvm/test/Transforms/InstCombine/load-cmp.ll @@ -122,7 +122,7 @@ define i1 @test4(i32 %X) { define i1 @test4_i16(i16 %X) { ; CHECK-LABEL: @test4_i16( -; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[X:%.*]] to i32 +; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i16 [[X:%.*]] to i32 ; CHECK-NEXT: [[TMP2:%.*]] = lshr i32 933, [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], 1 ; CHECK-NEXT: [[R:%.*]] = icmp ne i32 [[TMP3]], 0 diff --git a/llvm/test/Transforms/InstCombine/rem.ll b/llvm/test/Transforms/InstCombine/rem.ll index 364d51f343fb3..86ef4f45f7da4 100644 --- a/llvm/test/Transforms/InstCombine/rem.ll +++ b/llvm/test/Transforms/InstCombine/rem.ll @@ -249,7 +249,7 @@ define i32 @test4(i32 %X, i1 %C) { define i32 @test5(i32 %X, i8 %B) { ; CHECK-LABEL: @test5( -; CHECK-NEXT: [[SHIFT_UPGRD_1:%.*]] = zext i8 [[B:%.*]] to i32 +; CHECK-NEXT: [[SHIFT_UPGRD_1:%.*]] = zext nneg i8 [[B:%.*]] to i32 ; CHECK-NEXT: [[AMT:%.*]] = shl nuw i32 32, [[SHIFT_UPGRD_1]] ; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[AMT]], -1 ; CHECK-NEXT: [[V:%.*]] = and i32 [[TMP1]], [[X:%.*]] diff --git a/llvm/test/Transforms/InstCombine/select-bitext-bitwise-ops.ll b/llvm/test/Transforms/InstCombine/select-bitext-bitwise-ops.ll index 6910aa5b57a29..624f06271dcd8 100644 --- a/llvm/test/Transforms/InstCombine/select-bitext-bitwise-ops.ll +++ b/llvm/test/Transforms/InstCombine/select-bitext-bitwise-ops.ll @@ -73,7 +73,7 @@ define i64 @sel_false_val_is_a_masked_ashr_of_true_val1(i32 %x, i64 %y) { ; CHECK-LABEL: @sel_false_val_is_a_masked_ashr_of_true_val1( ; CHECK-NEXT: [[TMP1:%.*]] = ashr i32 [[X:%.*]], 2 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -536870897 -; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] ; CHECK-NEXT: ret i64 [[TMP4]] ; @@ -90,7 +90,7 @@ define i64 @sel_false_val_is_a_masked_ashr_of_true_val2(i32 %x, i64 %y) { ; CHECK-LABEL: @sel_false_val_is_a_masked_ashr_of_true_val2( ; CHECK-NEXT: [[TMP1:%.*]] = ashr i32 [[X:%.*]], 2 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -536870897 -; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] ; CHECK-NEXT: ret i64 [[TMP4]] ; diff --git a/llvm/test/Transforms/InstCombine/select-obo-peo-ops.ll b/llvm/test/Transforms/InstCombine/select-obo-peo-ops.ll index 7c70178e1bd51..4bc94d26e8758 100644 --- a/llvm/test/Transforms/InstCombine/select-obo-peo-ops.ll +++ b/llvm/test/Transforms/InstCombine/select-obo-peo-ops.ll @@ -73,7 +73,7 @@ define i64 @test_shl_nuw_nsw__nuw_is_safe(i32 %x, i64 %y) { ; CHECK-LABEL: @test_shl_nuw_nsw__nuw_is_safe( ; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], 2 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -8 -; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] ; CHECK-NEXT: ret i64 [[TMP4]] ; @@ -90,7 +90,7 @@ define i64 @test_shl_nuw__nuw_is_safe(i32 %x, i64 %y) { ; CHECK-LABEL: @test_shl_nuw__nuw_is_safe( ; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], 2 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -8 -; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] ; CHECK-NEXT: ret i64 [[TMP4]] ; @@ -107,7 +107,7 @@ define i64 @test_shl_nsw__nuw_is_safe(i32 %x, i64 %y) { ; CHECK-LABEL: @test_shl_nsw__nuw_is_safe( ; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], 2 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -8 -; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] ; CHECK-NEXT: ret i64 [[TMP4]] ; @@ -124,7 +124,7 @@ define i64 @test_shl__nuw_is_safe(i32 %x, i64 %y) { ; CHECK-LABEL: @test_shl__nuw_is_safe( ; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], 2 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -8 -; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] ; CHECK-NEXT: ret i64 [[TMP4]] ; @@ -202,7 +202,7 @@ define i64 @test_shl_nuw_nsw__none_are_safe(i32 %x, i64 %y) { ; CHECK-LABEL: @test_shl_nuw_nsw__none_are_safe( ; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], 2 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -8 -; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] ; CHECK-NEXT: ret i64 [[TMP4]] ; @@ -219,7 +219,7 @@ define i64 @test_shl_nuw__none_are_safe(i32 %x, i64 %y) { ; CHECK-LABEL: @test_shl_nuw__none_are_safe( ; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], 2 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -8 -; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] ; CHECK-NEXT: ret i64 [[TMP4]] ; @@ -236,7 +236,7 @@ define i64 @test_shl_nsw__none_are_safe(i32 %x, i64 %y) { ; CHECK-LABEL: @test_shl_nsw__none_are_safe( ; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], 2 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -8 -; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] ; CHECK-NEXT: ret i64 [[TMP4]] ; @@ -253,7 +253,7 @@ define i64 @test_shl__none_are_safe(i32 %x, i64 %y) { ; CHECK-LABEL: @test_shl__none_are_safe( ; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], 2 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -8 -; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] ; CHECK-NEXT: ret i64 [[TMP4]] ; @@ -338,7 +338,7 @@ define i64 @test_ashr_exact__exact_is_safe(i32 %x, i64 %y) { ; CHECK-LABEL: @test_ashr_exact__exact_is_safe( ; CHECK-NEXT: [[TMP1:%.*]] = ashr i32 [[X:%.*]], 2 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -536870897 -; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] ; CHECK-NEXT: ret i64 [[TMP4]] ; @@ -355,7 +355,7 @@ define i64 @test_ashr__exact_is_safe(i32 %x, i64 %y) { ; CHECK-LABEL: @test_ashr__exact_is_safe( ; CHECK-NEXT: [[TMP1:%.*]] = ashr i32 [[X:%.*]], 2 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -536870897 -; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] ; CHECK-NEXT: ret i64 [[TMP4]] ; @@ -372,7 +372,7 @@ define i64 @test_ashr_exact__exact_is_unsafe(i32 %x, i64 %y) { ; CHECK-LABEL: @test_ashr_exact__exact_is_unsafe( ; CHECK-NEXT: [[TMP1:%.*]] = ashr i32 [[X:%.*]], 2 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -536870897 -; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] ; CHECK-NEXT: ret i64 [[TMP4]] ; @@ -389,7 +389,7 @@ define i64 @test_ashr__exact_is_unsafe(i32 %x, i64 %y) { ; CHECK-LABEL: @test_ashr__exact_is_unsafe( ; CHECK-NEXT: [[TMP1:%.*]] = ashr i32 [[X:%.*]], 2 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -536870897 -; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] ; CHECK-NEXT: ret i64 [[TMP4]] ; diff --git a/llvm/test/Transforms/InstCombine/shift-add-inseltpoison.ll b/llvm/test/Transforms/InstCombine/shift-add-inseltpoison.ll index 0fca3dd504bcd..b26fbad7f973a 100644 --- a/llvm/test/Transforms/InstCombine/shift-add-inseltpoison.ll +++ b/llvm/test/Transforms/InstCombine/shift-add-inseltpoison.ll @@ -5,7 +5,7 @@ define i32 @shl_C1_add_A_C2_i32(i16 %A) { ; CHECK-LABEL: @shl_C1_add_A_C2_i32( -; CHECK-NEXT: [[B:%.*]] = zext i16 [[A:%.*]] to i32 +; CHECK-NEXT: [[B:%.*]] = zext nneg i16 [[A:%.*]] to i32 ; CHECK-NEXT: [[D:%.*]] = shl i32 192, [[B]] ; CHECK-NEXT: ret i32 [[D]] ; @@ -39,7 +39,7 @@ define i32 @lshr_C1_add_A_C2_i32(i32 %A) { define <4 x i32> @shl_C1_add_A_C2_v4i32(<4 x i16> %A) { ; CHECK-LABEL: @shl_C1_add_A_C2_v4i32( -; CHECK-NEXT: [[B:%.*]] = zext <4 x i16> [[A:%.*]] to <4 x i32> +; CHECK-NEXT: [[B:%.*]] = zext nneg <4 x i16> [[A:%.*]] to <4 x i32> ; CHECK-NEXT: [[D:%.*]] = shl <4 x i32> , [[B]] ; CHECK-NEXT: ret <4 x i32> [[D]] ; diff --git a/llvm/test/Transforms/InstCombine/shift-add.ll b/llvm/test/Transforms/InstCombine/shift-add.ll index 0301b93063385..006d303cb56d3 100644 --- a/llvm/test/Transforms/InstCombine/shift-add.ll +++ b/llvm/test/Transforms/InstCombine/shift-add.ll @@ -7,7 +7,7 @@ declare void @use(i8) define i32 @shl_C1_add_A_C2_i32(i16 %A) { ; CHECK-LABEL: @shl_C1_add_A_C2_i32( -; CHECK-NEXT: [[B:%.*]] = zext i16 [[A:%.*]] to i32 +; CHECK-NEXT: [[B:%.*]] = zext nneg i16 [[A:%.*]] to i32 ; CHECK-NEXT: [[D:%.*]] = shl i32 192, [[B]] ; CHECK-NEXT: ret i32 [[D]] ; @@ -41,7 +41,7 @@ define i32 @lshr_C1_add_A_C2_i32(i32 %A) { define <4 x i32> @shl_C1_add_A_C2_v4i32(<4 x i16> %A) { ; CHECK-LABEL: @shl_C1_add_A_C2_v4i32( -; CHECK-NEXT: [[B:%.*]] = zext <4 x i16> [[A:%.*]] to <4 x i32> +; CHECK-NEXT: [[B:%.*]] = zext nneg <4 x i16> [[A:%.*]] to <4 x i32> ; CHECK-NEXT: [[D:%.*]] = shl <4 x i32> , [[B]] ; CHECK-NEXT: ret <4 x i32> [[D]] ; diff --git a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-lshr.ll b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-lshr.ll index d48e4ab0d2342..60a7dce2a8753 100644 --- a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-lshr.ll +++ b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-lshr.ll @@ -19,7 +19,7 @@ define i1 @n0(i32 %x, i64 %y, i32 %len) { ; CHECK-NEXT: [[T0:%.*]] = sub i32 32, [[LEN:%.*]] ; CHECK-NEXT: [[T1:%.*]] = shl i32 [[X:%.*]], [[T0]] ; CHECK-NEXT: [[T2:%.*]] = add i32 [[LEN]], -16 -; CHECK-NEXT: [[T2_WIDE:%.*]] = zext i32 [[T2]] to i64 +; CHECK-NEXT: [[T2_WIDE:%.*]] = zext nneg i32 [[T2]] to i64 ; CHECK-NEXT: [[T3:%.*]] = lshr i64 [[Y:%.*]], [[T2_WIDE]] ; CHECK-NEXT: [[T3_TRUNC:%.*]] = trunc i64 [[T3]] to i32 ; CHECK-NEXT: [[T4:%.*]] = and i32 [[T1]], [[T3_TRUNC]] @@ -79,7 +79,7 @@ define i1 @n2(i64 %y, i32 %len) { ; CHECK-NEXT: [[T0:%.*]] = sub i32 32, [[LEN:%.*]] ; CHECK-NEXT: [[T1:%.*]] = shl i32 131071, [[T0]] ; CHECK-NEXT: [[T2:%.*]] = add i32 [[LEN]], -16 -; CHECK-NEXT: [[T2_WIDE:%.*]] = zext i32 [[T2]] to i64 +; CHECK-NEXT: [[T2_WIDE:%.*]] = zext nneg i32 [[T2]] to i64 ; CHECK-NEXT: [[T3:%.*]] = lshr i64 [[Y:%.*]], [[T2_WIDE]] ; CHECK-NEXT: [[T3_TRUNC:%.*]] = trunc i64 [[T3]] to i32 ; CHECK-NEXT: [[T4:%.*]] = and i32 [[T1]], [[T3_TRUNC]] @@ -137,7 +137,7 @@ define i1 @n4(i32 %x, i32 %len) { ; CHECK-NEXT: [[T0:%.*]] = sub i32 32, [[LEN:%.*]] ; CHECK-NEXT: [[T1:%.*]] = shl i32 [[X:%.*]], [[T0]] ; CHECK-NEXT: [[T2:%.*]] = add i32 [[LEN]], -16 -; CHECK-NEXT: [[T2_WIDE:%.*]] = zext i32 [[T2]] to i64 +; CHECK-NEXT: [[T2_WIDE:%.*]] = zext nneg i32 [[T2]] to i64 ; CHECK-NEXT: [[T3:%.*]] = lshr i64 262143, [[T2_WIDE]] ; CHECK-NEXT: [[T3_TRUNC:%.*]] = trunc i64 [[T3]] to i32 ; CHECK-NEXT: [[T4:%.*]] = and i32 [[T1]], [[T3_TRUNC]] @@ -186,7 +186,7 @@ define <2 x i1> @n6_vec(<2 x i64> %y, <2 x i32> %len) { ; CHECK-NEXT: [[T0:%.*]] = sub <2 x i32> , [[LEN:%.*]] ; CHECK-NEXT: [[T1:%.*]] = shl <2 x i32> , [[T0]] ; CHECK-NEXT: [[T2:%.*]] = add <2 x i32> [[LEN]], -; CHECK-NEXT: [[T2_WIDE:%.*]] = zext <2 x i32> [[T2]] to <2 x i64> +; CHECK-NEXT: [[T2_WIDE:%.*]] = zext nneg <2 x i32> [[T2]] to <2 x i64> ; CHECK-NEXT: [[T3:%.*]] = lshr <2 x i64> [[Y:%.*]], [[T2_WIDE]] ; CHECK-NEXT: [[T3_TRUNC:%.*]] = trunc <2 x i64> [[T3]] to <2 x i32> ; CHECK-NEXT: [[T4:%.*]] = and <2 x i32> [[T1]], [[T3_TRUNC]] @@ -227,7 +227,7 @@ define <2 x i1> @n8_vec(<2 x i32> %x, <2 x i32> %len) { ; CHECK-NEXT: [[T0:%.*]] = sub <2 x i32> , [[LEN:%.*]] ; CHECK-NEXT: [[T1:%.*]] = shl <2 x i32> [[X:%.*]], [[T0]] ; CHECK-NEXT: [[T2:%.*]] = add <2 x i32> [[LEN]], -; CHECK-NEXT: [[T2_WIDE:%.*]] = zext <2 x i32> [[T2]] to <2 x i64> +; CHECK-NEXT: [[T2_WIDE:%.*]] = zext nneg <2 x i32> [[T2]] to <2 x i64> ; CHECK-NEXT: [[T3:%.*]] = lshr <2 x i64> , [[T2_WIDE]] ; CHECK-NEXT: [[T3_TRUNC:%.*]] = trunc <2 x i64> [[T3]] to <2 x i32> ; CHECK-NEXT: [[T4:%.*]] = and <2 x i32> [[T1]], [[T3_TRUNC]] @@ -272,7 +272,7 @@ define i1 @t10_almost_highest_bit(i32 %x, i64 %y, i32 %len) { ; CHECK-NEXT: [[T0:%.*]] = sub i32 64, [[LEN:%.*]] ; CHECK-NEXT: [[T1:%.*]] = shl i32 [[X:%.*]], [[T0]] ; CHECK-NEXT: [[T2:%.*]] = add i32 [[LEN]], -2 -; CHECK-NEXT: [[T2_WIDE:%.*]] = zext i32 [[T2]] to i64 +; CHECK-NEXT: [[T2_WIDE:%.*]] = zext nneg i32 [[T2]] to i64 ; CHECK-NEXT: [[T3:%.*]] = lshr i64 [[Y:%.*]], [[T2_WIDE]] ; CHECK-NEXT: [[T3_TRUNC:%.*]] = trunc i64 [[T3]] to i32 ; CHECK-NEXT: [[T4:%.*]] = and i32 [[T1]], [[T3_TRUNC]] @@ -314,7 +314,7 @@ define i1 @t10_shift_by_one(i32 %x, i64 %y, i32 %len) { ; CHECK-NEXT: [[T0:%.*]] = sub i32 64, [[LEN:%.*]] ; CHECK-NEXT: [[T1:%.*]] = shl i32 [[X:%.*]], [[T0]] ; CHECK-NEXT: [[T2:%.*]] = add i32 [[LEN]], -63 -; CHECK-NEXT: [[T2_WIDE:%.*]] = zext i32 [[T2]] to i64 +; CHECK-NEXT: [[T2_WIDE:%.*]] = zext nneg i32 [[T2]] to i64 ; CHECK-NEXT: [[T3:%.*]] = lshr i64 [[Y:%.*]], [[T2_WIDE]] ; CHECK-NEXT: [[T3_TRUNC:%.*]] = trunc i64 [[T3]] to i32 ; CHECK-NEXT: [[T4:%.*]] = and i32 [[T1]], [[T3_TRUNC]] @@ -338,7 +338,7 @@ define <2 x i1> @t11_zero_and_almost_bitwidth(<2 x i32> %x, <2 x i64> %y, <2 x i ; CHECK-NEXT: [[T0:%.*]] = sub <2 x i32> , [[LEN:%.*]] ; CHECK-NEXT: [[T1:%.*]] = shl <2 x i32> [[X:%.*]], [[T0]] ; CHECK-NEXT: [[T2:%.*]] = add <2 x i32> [[LEN]], -; CHECK-NEXT: [[T2_WIDE:%.*]] = zext <2 x i32> [[T2]] to <2 x i64> +; CHECK-NEXT: [[T2_WIDE:%.*]] = zext nneg <2 x i32> [[T2]] to <2 x i64> ; CHECK-NEXT: [[T3:%.*]] = lshr <2 x i64> [[Y:%.*]], [[T2_WIDE]] ; CHECK-NEXT: [[T3_TRUNC:%.*]] = trunc <2 x i64> [[T3]] to <2 x i32> ; CHECK-NEXT: [[T4:%.*]] = and <2 x i32> [[T1]], [[T3_TRUNC]] @@ -360,7 +360,7 @@ define <2 x i1> @n12_bad(<2 x i32> %x, <2 x i64> %y, <2 x i32> %len) { ; CHECK-NEXT: [[T0:%.*]] = sub <2 x i32> , [[LEN:%.*]] ; CHECK-NEXT: [[T1:%.*]] = shl <2 x i32> [[X:%.*]], [[T0]] ; CHECK-NEXT: [[T2:%.*]] = add <2 x i32> [[LEN]], -; CHECK-NEXT: [[T2_WIDE:%.*]] = zext <2 x i32> [[T2]] to <2 x i64> +; CHECK-NEXT: [[T2_WIDE:%.*]] = zext nneg <2 x i32> [[T2]] to <2 x i64> ; CHECK-NEXT: [[T3:%.*]] = lshr <2 x i64> [[Y:%.*]], [[T2_WIDE]] ; CHECK-NEXT: [[T3_TRUNC:%.*]] = trunc <2 x i64> [[T3]] to <2 x i32> ; CHECK-NEXT: [[T4:%.*]] = and <2 x i32> [[T1]], [[T3_TRUNC]] diff --git a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-shl.ll b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-shl.ll index e49fea4936217..3a85f19d8a037 100644 --- a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-shl.ll +++ b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-shl.ll @@ -400,7 +400,7 @@ define i1 @n13_overshift(i32 %x, i64 %y, i32 %len) { ; CHECK-NEXT: [[T0:%.*]] = sub i32 32, [[LEN:%.*]] ; CHECK-NEXT: [[T1:%.*]] = lshr i32 [[X:%.*]], [[T0]] ; CHECK-NEXT: [[T2:%.*]] = add i32 [[LEN]], 32 -; CHECK-NEXT: [[T2_WIDE:%.*]] = zext i32 [[T2]] to i64 +; CHECK-NEXT: [[T2_WIDE:%.*]] = zext nneg i32 [[T2]] to i64 ; CHECK-NEXT: [[T3:%.*]] = shl i64 [[Y:%.*]], [[T2_WIDE]] ; CHECK-NEXT: [[T3_TRUNC:%.*]] = trunc i64 [[T3]] to i32 ; CHECK-NEXT: [[T4:%.*]] = and i32 [[T1]], [[T3_TRUNC]] @@ -421,7 +421,7 @@ define i1 @n13_overshift(i32 %x, i64 %y, i32 %len) { define i1 @n14_trunc_of_lshr(i64 %x, i32 %y, i32 %len) { ; CHECK-LABEL: @n14_trunc_of_lshr( ; CHECK-NEXT: [[T0:%.*]] = sub i32 32, [[LEN:%.*]] -; CHECK-NEXT: [[T0_WIDE:%.*]] = zext i32 [[T0]] to i64 +; CHECK-NEXT: [[T0_WIDE:%.*]] = zext nneg i32 [[T0]] to i64 ; CHECK-NEXT: [[T1:%.*]] = lshr i64 [[X:%.*]], [[T0_WIDE]] ; CHECK-NEXT: [[T1_TRUNC:%.*]] = trunc i64 [[T1]] to i32 ; CHECK-NEXT: [[T2:%.*]] = add i32 [[LEN]], -1 diff --git a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-ashr.ll b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-ashr.ll index 6c0c6ee0f19db..6773cbac1d1e8 100644 --- a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-ashr.ll +++ b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-ashr.ll @@ -95,7 +95,7 @@ declare void @use32(i32) define i16 @t6_extrause0(i32 %x, i16 %y) { ; CHECK-LABEL: @t6_extrause0( ; CHECK-NEXT: [[T0:%.*]] = sub i16 32, [[Y:%.*]] -; CHECK-NEXT: [[T1:%.*]] = zext i16 [[T0]] to i32 +; CHECK-NEXT: [[T1:%.*]] = zext nneg i16 [[T0]] to i32 ; CHECK-NEXT: [[T2:%.*]] = ashr i32 [[X:%.*]], [[T1]] ; CHECK-NEXT: [[T3:%.*]] = trunc i32 [[T2]] to i16 ; CHECK-NEXT: call void @use16(i16 [[T3]]) @@ -134,7 +134,7 @@ define i16 @t7_extrause1(i32 %x, i16 %y) { define i16 @t8_extrause2(i32 %x, i16 %y) { ; CHECK-LABEL: @t8_extrause2( ; CHECK-NEXT: [[T0:%.*]] = sub i16 32, [[Y:%.*]] -; CHECK-NEXT: [[T1:%.*]] = zext i16 [[T0]] to i32 +; CHECK-NEXT: [[T1:%.*]] = zext nneg i16 [[T0]] to i32 ; CHECK-NEXT: [[T2:%.*]] = ashr i32 [[X:%.*]], [[T1]] ; CHECK-NEXT: [[T3:%.*]] = trunc i32 [[T2]] to i16 ; CHECK-NEXT: [[T4:%.*]] = add i16 [[Y]], -1 @@ -163,7 +163,7 @@ define i16 @t8_extrause2(i32 %x, i16 %y) { define i16 @t9_ashr(i32 %x, i16 %y) { ; CHECK-LABEL: @t9_ashr( ; CHECK-NEXT: [[T0:%.*]] = sub i16 32, [[Y:%.*]] -; CHECK-NEXT: [[T1:%.*]] = zext i16 [[T0]] to i32 +; CHECK-NEXT: [[T1:%.*]] = zext nneg i16 [[T0]] to i32 ; CHECK-NEXT: [[T2:%.*]] = ashr i32 [[X:%.*]], [[T1]] ; CHECK-NEXT: [[T3:%.*]] = trunc i32 [[T2]] to i16 ; CHECK-NEXT: [[T4:%.*]] = add i16 [[Y]], -2 @@ -183,7 +183,7 @@ define i16 @t9_ashr(i32 %x, i16 %y) { define i16 @n10_lshr_ashr(i32 %x, i16 %y) { ; CHECK-LABEL: @n10_lshr_ashr( ; CHECK-NEXT: [[T0:%.*]] = sub i16 32, [[Y:%.*]] -; CHECK-NEXT: [[T1:%.*]] = zext i16 [[T0]] to i32 +; CHECK-NEXT: [[T1:%.*]] = zext nneg i16 [[T0]] to i32 ; CHECK-NEXT: [[T2:%.*]] = lshr i32 [[X:%.*]], [[T1]] ; CHECK-NEXT: [[T3:%.*]] = trunc i32 [[T2]] to i16 ; CHECK-NEXT: [[T4:%.*]] = add i16 [[Y]], -1 diff --git a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-lshr.ll b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-lshr.ll index c257f817684cb..63099a8af81f6 100644 --- a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-lshr.ll +++ b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-lshr.ll @@ -95,7 +95,7 @@ declare void @use32(i32) define i16 @t6_extrause0(i32 %x, i16 %y) { ; CHECK-LABEL: @t6_extrause0( ; CHECK-NEXT: [[T0:%.*]] = sub i16 32, [[Y:%.*]] -; CHECK-NEXT: [[T1:%.*]] = zext i16 [[T0]] to i32 +; CHECK-NEXT: [[T1:%.*]] = zext nneg i16 [[T0]] to i32 ; CHECK-NEXT: [[T2:%.*]] = lshr i32 [[X:%.*]], [[T1]] ; CHECK-NEXT: [[T3:%.*]] = trunc i32 [[T2]] to i16 ; CHECK-NEXT: call void @use16(i16 [[T3]]) @@ -134,7 +134,7 @@ define i16 @t7_extrause1(i32 %x, i16 %y) { define i16 @t8_extrause2(i32 %x, i16 %y) { ; CHECK-LABEL: @t8_extrause2( ; CHECK-NEXT: [[T0:%.*]] = sub i16 32, [[Y:%.*]] -; CHECK-NEXT: [[T1:%.*]] = zext i16 [[T0]] to i32 +; CHECK-NEXT: [[T1:%.*]] = zext nneg i16 [[T0]] to i32 ; CHECK-NEXT: [[T2:%.*]] = lshr i32 [[X:%.*]], [[T1]] ; CHECK-NEXT: [[T3:%.*]] = trunc i32 [[T2]] to i16 ; CHECK-NEXT: [[T4:%.*]] = add i16 [[Y]], -1 @@ -163,7 +163,7 @@ define i16 @t8_extrause2(i32 %x, i16 %y) { define i16 @t9_lshr(i32 %x, i16 %y) { ; CHECK-LABEL: @t9_lshr( ; CHECK-NEXT: [[T0:%.*]] = sub i16 32, [[Y:%.*]] -; CHECK-NEXT: [[T1:%.*]] = zext i16 [[T0]] to i32 +; CHECK-NEXT: [[T1:%.*]] = zext nneg i16 [[T0]] to i32 ; CHECK-NEXT: [[T2:%.*]] = lshr i32 [[X:%.*]], [[T1]] ; CHECK-NEXT: [[T3:%.*]] = trunc i32 [[T2]] to i16 ; CHECK-NEXT: [[T4:%.*]] = add i16 [[Y]], -2 @@ -183,7 +183,7 @@ define i16 @t9_lshr(i32 %x, i16 %y) { define i16 @n10_ashr_lshr(i32 %x, i16 %y) { ; CHECK-LABEL: @n10_ashr_lshr( ; CHECK-NEXT: [[T0:%.*]] = sub i16 32, [[Y:%.*]] -; CHECK-NEXT: [[T1:%.*]] = zext i16 [[T0]] to i32 +; CHECK-NEXT: [[T1:%.*]] = zext nneg i16 [[T0]] to i32 ; CHECK-NEXT: [[T2:%.*]] = ashr i32 [[X:%.*]], [[T1]] ; CHECK-NEXT: [[T3:%.*]] = trunc i32 [[T2]] to i16 ; CHECK-NEXT: [[T4:%.*]] = add i16 [[Y]], -1 diff --git a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-shl.ll b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-shl.ll index dd70169c7f17f..073013b34a3ba 100644 --- a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-shl.ll +++ b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-shl.ll @@ -112,7 +112,7 @@ declare void @use32(i32) define i16 @t6_extrause0(i32 %x, i16 %y) { ; CHECK-LABEL: @t6_extrause0( ; CHECK-NEXT: [[T0:%.*]] = sub i16 32, [[Y:%.*]] -; CHECK-NEXT: [[T1:%.*]] = zext i16 [[T0]] to i32 +; CHECK-NEXT: [[T1:%.*]] = zext nneg i16 [[T0]] to i32 ; CHECK-NEXT: [[T2:%.*]] = shl i32 [[X:%.*]], [[T1]] ; CHECK-NEXT: [[T3:%.*]] = trunc i32 [[T2]] to i16 ; CHECK-NEXT: call void @use16(i16 [[T3]]) @@ -151,7 +151,7 @@ define i16 @t7_extrause1(i32 %x, i16 %y) { define i16 @t8_extrause2(i32 %x, i16 %y) { ; CHECK-LABEL: @t8_extrause2( ; CHECK-NEXT: [[T0:%.*]] = sub i16 32, [[Y:%.*]] -; CHECK-NEXT: [[T1:%.*]] = zext i16 [[T0]] to i32 +; CHECK-NEXT: [[T1:%.*]] = zext nneg i16 [[T0]] to i32 ; CHECK-NEXT: [[T2:%.*]] = shl i32 [[X:%.*]], [[T1]] ; CHECK-NEXT: [[T3:%.*]] = trunc i32 [[T2]] to i16 ; CHECK-NEXT: [[T4:%.*]] = add i16 [[Y]], -24 @@ -180,7 +180,7 @@ define i16 @t8_extrause2(i32 %x, i16 %y) { define i16 @n11(i32 %x, i16 %y) { ; CHECK-LABEL: @n11( ; CHECK-NEXT: [[T0:%.*]] = sub i16 30, [[Y:%.*]] -; CHECK-NEXT: [[T1:%.*]] = zext i16 [[T0]] to i32 +; CHECK-NEXT: [[T1:%.*]] = zext nneg i16 [[T0]] to i32 ; CHECK-NEXT: [[T2:%.*]] = shl i32 [[X:%.*]], [[T1]] ; CHECK-NEXT: [[T3:%.*]] = trunc i32 [[T2]] to i16 ; CHECK-NEXT: [[T4:%.*]] = add i16 [[Y]], -31 diff --git a/llvm/test/Transforms/InstCombine/shift-by-signext.ll b/llvm/test/Transforms/InstCombine/shift-by-signext.ll index b72f33fc65023..7fe4364cc0801 100644 --- a/llvm/test/Transforms/InstCombine/shift-by-signext.ll +++ b/llvm/test/Transforms/InstCombine/shift-by-signext.ll @@ -6,7 +6,7 @@ define i32 @t0_shl(i32 %x, i8 %shamt) { ; CHECK-LABEL: @t0_shl( -; CHECK-NEXT: [[SHAMT_WIDE1:%.*]] = zext i8 [[SHAMT:%.*]] to i32 +; CHECK-NEXT: [[SHAMT_WIDE1:%.*]] = zext nneg i8 [[SHAMT:%.*]] to i32 ; CHECK-NEXT: [[R:%.*]] = shl i32 [[X:%.*]], [[SHAMT_WIDE1]] ; CHECK-NEXT: ret i32 [[R]] ; @@ -16,7 +16,7 @@ define i32 @t0_shl(i32 %x, i8 %shamt) { } define i32 @t1_lshr(i32 %x, i8 %shamt) { ; CHECK-LABEL: @t1_lshr( -; CHECK-NEXT: [[SHAMT_WIDE1:%.*]] = zext i8 [[SHAMT:%.*]] to i32 +; CHECK-NEXT: [[SHAMT_WIDE1:%.*]] = zext nneg i8 [[SHAMT:%.*]] to i32 ; CHECK-NEXT: [[R:%.*]] = lshr i32 [[X:%.*]], [[SHAMT_WIDE1]] ; CHECK-NEXT: ret i32 [[R]] ; @@ -26,7 +26,7 @@ define i32 @t1_lshr(i32 %x, i8 %shamt) { } define i32 @t2_ashr(i32 %x, i8 %shamt) { ; CHECK-LABEL: @t2_ashr( -; CHECK-NEXT: [[SHAMT_WIDE1:%.*]] = zext i8 [[SHAMT:%.*]] to i32 +; CHECK-NEXT: [[SHAMT_WIDE1:%.*]] = zext nneg i8 [[SHAMT:%.*]] to i32 ; CHECK-NEXT: [[R:%.*]] = ashr i32 [[X:%.*]], [[SHAMT_WIDE1]] ; CHECK-NEXT: ret i32 [[R]] ; @@ -37,7 +37,7 @@ define i32 @t2_ashr(i32 %x, i8 %shamt) { define <2 x i32> @t3_vec_shl(<2 x i32> %x, <2 x i8> %shamt) { ; CHECK-LABEL: @t3_vec_shl( -; CHECK-NEXT: [[SHAMT_WIDE1:%.*]] = zext <2 x i8> [[SHAMT:%.*]] to <2 x i32> +; CHECK-NEXT: [[SHAMT_WIDE1:%.*]] = zext nneg <2 x i8> [[SHAMT:%.*]] to <2 x i32> ; CHECK-NEXT: [[R:%.*]] = shl <2 x i32> [[X:%.*]], [[SHAMT_WIDE1]] ; CHECK-NEXT: ret <2 x i32> [[R]] ; @@ -47,7 +47,7 @@ define <2 x i32> @t3_vec_shl(<2 x i32> %x, <2 x i8> %shamt) { } define <2 x i32> @t4_vec_lshr(<2 x i32> %x, <2 x i8> %shamt) { ; CHECK-LABEL: @t4_vec_lshr( -; CHECK-NEXT: [[SHAMT_WIDE1:%.*]] = zext <2 x i8> [[SHAMT:%.*]] to <2 x i32> +; CHECK-NEXT: [[SHAMT_WIDE1:%.*]] = zext nneg <2 x i8> [[SHAMT:%.*]] to <2 x i32> ; CHECK-NEXT: [[R:%.*]] = lshr <2 x i32> [[X:%.*]], [[SHAMT_WIDE1]] ; CHECK-NEXT: ret <2 x i32> [[R]] ; @@ -57,7 +57,7 @@ define <2 x i32> @t4_vec_lshr(<2 x i32> %x, <2 x i8> %shamt) { } define <2 x i32> @t5_vec_ashr(<2 x i32> %x, <2 x i8> %shamt) { ; CHECK-LABEL: @t5_vec_ashr( -; CHECK-NEXT: [[SHAMT_WIDE1:%.*]] = zext <2 x i8> [[SHAMT:%.*]] to <2 x i32> +; CHECK-NEXT: [[SHAMT_WIDE1:%.*]] = zext nneg <2 x i8> [[SHAMT:%.*]] to <2 x i32> ; CHECK-NEXT: [[R:%.*]] = ashr <2 x i32> [[X:%.*]], [[SHAMT_WIDE1]] ; CHECK-NEXT: ret <2 x i32> [[R]] ; diff --git a/llvm/test/Transforms/InstCombine/shift-sra.ll b/llvm/test/Transforms/InstCombine/shift-sra.ll index 2e32689e47108..82bce5f02554e 100644 --- a/llvm/test/Transforms/InstCombine/shift-sra.ll +++ b/llvm/test/Transforms/InstCombine/shift-sra.ll @@ -4,7 +4,7 @@ define i32 @test1(i32 %X, i8 %A) { ; CHECK-LABEL: @test1( -; CHECK-NEXT: [[SHIFT_UPGRD_1:%.*]] = zext i8 [[A:%.*]] to i32 +; CHECK-NEXT: [[SHIFT_UPGRD_1:%.*]] = zext nneg i8 [[A:%.*]] to i32 ; CHECK-NEXT: [[Y1:%.*]] = lshr i32 [[X:%.*]], [[SHIFT_UPGRD_1]] ; CHECK-NEXT: [[Z:%.*]] = and i32 [[Y1]], 1 ; CHECK-NEXT: ret i32 [[Z]] diff --git a/llvm/test/Transforms/InstCombine/variable-signext-of-variable-high-bit-extraction.ll b/llvm/test/Transforms/InstCombine/variable-signext-of-variable-high-bit-extraction.ll index 358e86a12b267..b0a434e55f489 100644 --- a/llvm/test/Transforms/InstCombine/variable-signext-of-variable-high-bit-extraction.ll +++ b/llvm/test/Transforms/InstCombine/variable-signext-of-variable-high-bit-extraction.ll @@ -188,7 +188,7 @@ define i64 @t3_notrunc_redundant_sext(i64 %data, i64 %nbits) { define <2 x i32> @t4_vec(<2 x i64> %data, <2 x i32> %nbits) { ; CHECK-LABEL: @t4_vec( ; CHECK-NEXT: [[SKIP_HIGH:%.*]] = sub <2 x i32> , [[NBITS:%.*]] -; CHECK-NEXT: [[SKIP_HIGH_WIDE:%.*]] = zext <2 x i32> [[SKIP_HIGH]] to <2 x i64> +; CHECK-NEXT: [[SKIP_HIGH_WIDE:%.*]] = zext nneg <2 x i32> [[SKIP_HIGH]] to <2 x i64> ; CHECK-NEXT: [[TMP1:%.*]] = ashr <2 x i64> [[DATA:%.*]], [[SKIP_HIGH_WIDE]] ; CHECK-NEXT: [[SIGNEXTENDED:%.*]] = trunc <2 x i64> [[TMP1]] to <2 x i32> ; CHECK-NEXT: ret <2 x i32> [[SIGNEXTENDED]] @@ -206,7 +206,7 @@ define <2 x i32> @t4_vec(<2 x i64> %data, <2 x i32> %nbits) { define <3 x i32> @t5_vec_undef(<3 x i64> %data, <3 x i32> %nbits) { ; CHECK-LABEL: @t5_vec_undef( ; CHECK-NEXT: [[SKIP_HIGH:%.*]] = sub <3 x i32> , [[NBITS:%.*]] -; CHECK-NEXT: [[SKIP_HIGH_WIDE:%.*]] = zext <3 x i32> [[SKIP_HIGH]] to <3 x i64> +; CHECK-NEXT: [[SKIP_HIGH_WIDE:%.*]] = zext nneg <3 x i32> [[SKIP_HIGH]] to <3 x i64> ; CHECK-NEXT: [[TMP1:%.*]] = ashr <3 x i64> [[DATA:%.*]], [[SKIP_HIGH_WIDE]] ; CHECK-NEXT: [[SIGNEXTENDED:%.*]] = trunc <3 x i64> [[TMP1]] to <3 x i32> ; CHECK-NEXT: ret <3 x i32> [[SIGNEXTENDED]] diff --git a/llvm/test/Transforms/InstCombine/vector-udiv.ll b/llvm/test/Transforms/InstCombine/vector-udiv.ll index c3468e95ba3d4..c817b3a1ac5a0 100644 --- a/llvm/test/Transforms/InstCombine/vector-udiv.ll +++ b/llvm/test/Transforms/InstCombine/vector-udiv.ll @@ -75,7 +75,7 @@ define <4 x i32> @test_v4i32_shl_const_pow2(<4 x i32> %a0, <4 x i32> %a1) { define <4 x i32> @test_v4i32_zext_shl_splatconst_pow2(<4 x i32> %a0, <4 x i16> %a1) { ; CHECK-LABEL: @test_v4i32_zext_shl_splatconst_pow2( ; CHECK-NEXT: [[TMP1:%.*]] = add <4 x i16> [[A1:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = zext <4 x i16> [[TMP1]] to <4 x i32> +; CHECK-NEXT: [[TMP2:%.*]] = zext nneg <4 x i16> [[TMP1]] to <4 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = lshr <4 x i32> [[A0:%.*]], [[TMP2]] ; CHECK-NEXT: ret <4 x i32> [[TMP3]] ; @@ -88,7 +88,7 @@ define <4 x i32> @test_v4i32_zext_shl_splatconst_pow2(<4 x i32> %a0, <4 x i16> % define <4 x i32> @test_v4i32_zext_shl_const_pow2(<4 x i32> %a0, <4 x i16> %a1) { ; CHECK-LABEL: @test_v4i32_zext_shl_const_pow2( ; CHECK-NEXT: [[TMP1:%.*]] = add <4 x i16> [[A1:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = zext <4 x i16> [[TMP1]] to <4 x i32> +; CHECK-NEXT: [[TMP2:%.*]] = zext nneg <4 x i16> [[TMP1]] to <4 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = lshr <4 x i32> [[A0:%.*]], [[TMP2]] ; CHECK-NEXT: ret <4 x i32> [[TMP3]] ;