diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp index 2285a91cbdf2b..2127000c4b780 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -1372,8 +1372,11 @@ Instruction *InstCombinerImpl::visitSExt(SExtInst &Sext) { unsigned DestBitSize = DestTy->getScalarSizeInBits(); // If the value being extended is zero or positive, use a zext instead. - if (isKnownNonNegative(Src, DL, 0, &AC, &Sext, &DT)) - return CastInst::Create(Instruction::ZExt, Src, DestTy); + if (isKnownNonNegative(Src, DL, 0, &AC, &Sext, &DT)) { + auto CI = CastInst::Create(Instruction::ZExt, Src, DestTy); + CI->setNonNeg(true); + return CI; + } // Try to extend the entire expression tree to the wide destination type. if (shouldChangeType(SrcTy, DestTy) && canEvaluateSExtd(Src, DestTy)) { diff --git a/llvm/test/Transforms/InstCombine/adjust-for-minmax.ll b/llvm/test/Transforms/InstCombine/adjust-for-minmax.ll index 67871f3d64c41..dced559445053 100644 --- a/llvm/test/Transforms/InstCombine/adjust-for-minmax.ll +++ b/llvm/test/Transforms/InstCombine/adjust-for-minmax.ll @@ -246,7 +246,7 @@ define <2 x i32> @umin4_vec(<2 x i32> %n) { define i64 @smax_sext(i32 %a) { ; CHECK-LABEL: @smax_sext( ; CHECK-NEXT: [[NARROW:%.*]] = call i32 @llvm.smax.i32(i32 [[A:%.*]], i32 0) -; CHECK-NEXT: [[MAX:%.*]] = zext i32 [[NARROW]] to i64 +; CHECK-NEXT: [[MAX:%.*]] = zext nneg i32 [[NARROW]] to i64 ; CHECK-NEXT: ret i64 [[MAX]] ; %a_ext = sext i32 %a to i64 @@ -258,7 +258,7 @@ define i64 @smax_sext(i32 %a) { define <2 x i64> @smax_sext_vec(<2 x i32> %a) { ; CHECK-LABEL: @smax_sext_vec( ; CHECK-NEXT: [[NARROW:%.*]] = call <2 x i32> @llvm.smax.v2i32(<2 x i32> [[A:%.*]], <2 x i32> zeroinitializer) -; CHECK-NEXT: [[MAX:%.*]] = zext <2 x i32> [[NARROW]] to <2 x i64> +; CHECK-NEXT: [[MAX:%.*]] = zext nneg <2 x i32> [[NARROW]] to <2 x i64> ; CHECK-NEXT: ret <2 x i64> [[MAX]] ; %a_ext = sext <2 x i32> %a to <2 x i64> @@ -318,7 +318,7 @@ define <2 x i64> @umax_sext_vec(<2 x i32> %a) { define i64 @umin_sext(i32 %a) { ; CHECK-LABEL: @umin_sext( ; CHECK-NEXT: [[NARROW:%.*]] = call i32 @llvm.umin.i32(i32 [[A:%.*]], i32 2) -; CHECK-NEXT: [[MIN:%.*]] = zext i32 [[NARROW]] to i64 +; CHECK-NEXT: [[MIN:%.*]] = zext nneg i32 [[NARROW]] to i64 ; CHECK-NEXT: ret i64 [[MIN]] ; %a_ext = sext i32 %a to i64 @@ -330,7 +330,7 @@ define i64 @umin_sext(i32 %a) { define <2 x i64> @umin_sext_vec(<2 x i32> %a) { ; CHECK-LABEL: @umin_sext_vec( ; CHECK-NEXT: [[NARROW:%.*]] = call <2 x i32> @llvm.umin.v2i32(<2 x i32> [[A:%.*]], <2 x i32> ) -; CHECK-NEXT: [[MIN:%.*]] = zext <2 x i32> [[NARROW]] to <2 x i64> +; CHECK-NEXT: [[MIN:%.*]] = zext nneg <2 x i32> [[NARROW]] to <2 x i64> ; CHECK-NEXT: ret <2 x i64> [[MIN]] ; %a_ext = sext <2 x i32> %a to <2 x i64> @@ -366,7 +366,7 @@ define <2 x i64> @umax_sext2_vec(<2 x i32> %a) { define i64 @umin_sext2(i32 %a) { ; CHECK-LABEL: @umin_sext2( ; CHECK-NEXT: [[NARROW:%.*]] = call i32 @llvm.umin.i32(i32 [[A:%.*]], i32 3) -; CHECK-NEXT: [[MIN:%.*]] = zext i32 [[NARROW]] to i64 +; CHECK-NEXT: [[MIN:%.*]] = zext nneg i32 [[NARROW]] to i64 ; CHECK-NEXT: ret i64 [[MIN]] ; %a_ext = sext i32 %a to i64 @@ -378,7 +378,7 @@ define i64 @umin_sext2(i32 %a) { define <2 x i64> @umin_sext2_vec(<2 x i32> %a) { ; CHECK-LABEL: @umin_sext2_vec( ; CHECK-NEXT: [[NARROW:%.*]] = call <2 x i32> @llvm.umin.v2i32(<2 x i32> [[A:%.*]], <2 x i32> ) -; CHECK-NEXT: [[MIN:%.*]] = zext <2 x i32> [[NARROW]] to <2 x i64> +; CHECK-NEXT: [[MIN:%.*]] = zext nneg <2 x i32> [[NARROW]] to <2 x i64> ; CHECK-NEXT: ret <2 x i64> [[MIN]] ; %a_ext = sext <2 x i32> %a to <2 x i64> diff --git a/llvm/test/Transforms/InstCombine/cast-mul-select.ll b/llvm/test/Transforms/InstCombine/cast-mul-select.ll index ab8333beb9e76..23e934de0baeb 100644 --- a/llvm/test/Transforms/InstCombine/cast-mul-select.ll +++ b/llvm/test/Transforms/InstCombine/cast-mul-select.ll @@ -193,7 +193,7 @@ define void @PR36225(i32 %a, i32 %b, i1 %c1, i3 %v1, i3 %v2) { ; CHECK-NEXT: ] ; CHECK: for.end: ; CHECK-NEXT: [[H:%.*]] = phi i8 [ [[SPEC_SELECT]], [[FOR_BODY3_US]] ], [ [[SPEC_SELECT]], [[FOR_BODY3_US]] ], [ 0, [[FOR_BODY3]] ], [ 0, [[FOR_BODY3]] ] -; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[H]] to i32 +; CHECK-NEXT: [[CONV:%.*]] = zext nneg i8 [[H]] to i32 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV]], [[A:%.*]] ; CHECK-NEXT: br i1 [[CMP]], label [[EXIT]], label [[EXIT2:%.*]] ; CHECK: exit2: @@ -224,7 +224,7 @@ define void @PR36225(i32 %a, i32 %b, i1 %c1, i3 %v1, i3 %v2) { ; DBGINFO: for.end: ; DBGINFO-NEXT: [[H:%.*]] = phi i8 [ [[SPEC_SELECT]], [[FOR_BODY3_US]] ], [ [[SPEC_SELECT]], [[FOR_BODY3_US]] ], [ 0, [[FOR_BODY3]] ], [ 0, [[FOR_BODY3]] ], !dbg [[DBG100:![0-9]+]] ; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i8 [[H]], metadata [[META91:![0-9]+]], metadata !DIExpression()), !dbg [[DBG100]] -; DBGINFO-NEXT: [[CONV:%.*]] = zext i8 [[H]] to i32, !dbg [[DBG101:![0-9]+]] +; DBGINFO-NEXT: [[CONV:%.*]] = zext nneg i8 [[H]] to i32, !dbg [[DBG101:![0-9]+]] ; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i32 [[CONV]], metadata [[META92:![0-9]+]], metadata !DIExpression()), !dbg [[DBG101]] ; DBGINFO-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV]], [[A:%.*]], !dbg [[DBG102:![0-9]+]] ; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i1 [[CMP]], metadata [[META93:![0-9]+]], metadata !DIExpression()), !dbg [[DBG102]] diff --git a/llvm/test/Transforms/InstCombine/icmp-ext-ext.ll b/llvm/test/Transforms/InstCombine/icmp-ext-ext.ll index b3dafe06a3879..f70e48e273846 100644 --- a/llvm/test/Transforms/InstCombine/icmp-ext-ext.ll +++ b/llvm/test/Transforms/InstCombine/icmp-ext-ext.ll @@ -289,7 +289,7 @@ define i1 @zext_sext_eq_known_nonneg(i8 %x, i8 %y) { define i1 @zext_sext_sle_known_nonneg_op0_narrow(i8 %x, i16 %y) { ; CHECK-LABEL: @zext_sext_sle_known_nonneg_op0_narrow( ; CHECK-NEXT: [[N:%.*]] = and i8 [[X:%.*]], 12 -; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[N]] to i16 +; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i8 [[N]] to i16 ; CHECK-NEXT: [[C:%.*]] = icmp sle i16 [[TMP1]], [[Y:%.*]] ; CHECK-NEXT: ret i1 [[C]] ; @@ -370,7 +370,7 @@ define <2 x i1> @sext_zext_sge_known_nonneg_op0_narrow(<2 x i5> %x, <2 x i8> %y) define i1 @sext_zext_uge_known_nonneg_op0_wide(i16 %x, i8 %y) { ; CHECK-LABEL: @sext_zext_uge_known_nonneg_op0_wide( ; CHECK-NEXT: [[N:%.*]] = and i8 [[Y:%.*]], 12 -; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[N]] to i16 +; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i8 [[N]] to i16 ; CHECK-NEXT: [[C:%.*]] = icmp ule i16 [[TMP1]], [[X:%.*]] ; CHECK-NEXT: ret i1 [[C]] ; diff --git a/llvm/test/Transforms/InstCombine/memcpy-from-global.ll b/llvm/test/Transforms/InstCombine/memcpy-from-global.ll index ea9b16e1382ee..59e756eed3fd7 100644 --- a/llvm/test/Transforms/InstCombine/memcpy-from-global.ll +++ b/llvm/test/Transforms/InstCombine/memcpy-from-global.ll @@ -8,25 +8,25 @@ define float @test1(i32 %hash, float %x, float %y, float %z, float %w) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP3:%.*]] = shl i32 [[HASH:%.*]], 2 ; CHECK-NEXT: [[TMP5:%.*]] = and i32 [[TMP3]], 124 -; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[TMP5]] to i64 +; CHECK-NEXT: [[TMP0:%.*]] = zext nneg i32 [[TMP5]] to i64 ; CHECK-NEXT: [[TMP753:%.*]] = getelementptr [128 x float], ptr @C.0.1248, i64 0, i64 [[TMP0]] ; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[TMP753]], align 4 ; CHECK-NEXT: [[TMP11:%.*]] = fmul float [[TMP9]], [[X:%.*]] ; CHECK-NEXT: [[TMP13:%.*]] = fadd float [[TMP11]], 0.000000e+00 ; CHECK-NEXT: [[TMP17_SUM52:%.*]] = or i32 [[TMP5]], 1 -; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP17_SUM52]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[TMP17_SUM52]] to i64 ; CHECK-NEXT: [[TMP1851:%.*]] = getelementptr [128 x float], ptr @C.0.1248, i64 0, i64 [[TMP1]] ; CHECK-NEXT: [[TMP19:%.*]] = load float, ptr [[TMP1851]], align 4 ; CHECK-NEXT: [[TMP21:%.*]] = fmul float [[TMP19]], [[Y:%.*]] ; CHECK-NEXT: [[TMP23:%.*]] = fadd float [[TMP21]], [[TMP13]] ; CHECK-NEXT: [[TMP27_SUM50:%.*]] = or i32 [[TMP5]], 2 -; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP27_SUM50]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = zext nneg i32 [[TMP27_SUM50]] to i64 ; CHECK-NEXT: [[TMP2849:%.*]] = getelementptr [128 x float], ptr @C.0.1248, i64 0, i64 [[TMP2]] ; CHECK-NEXT: [[TMP29:%.*]] = load float, ptr [[TMP2849]], align 4 ; CHECK-NEXT: [[TMP31:%.*]] = fmul float [[TMP29]], [[Z:%.*]] ; CHECK-NEXT: [[TMP33:%.*]] = fadd float [[TMP31]], [[TMP23]] ; CHECK-NEXT: [[TMP37_SUM48:%.*]] = or i32 [[TMP5]], 3 -; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP37_SUM48]] to i64 +; CHECK-NEXT: [[TMP3:%.*]] = zext nneg i32 [[TMP37_SUM48]] to i64 ; CHECK-NEXT: [[TMP3847:%.*]] = getelementptr [128 x float], ptr @C.0.1248, i64 0, i64 [[TMP3]] ; CHECK-NEXT: [[TMP39:%.*]] = load float, ptr [[TMP3847]], align 4 ; CHECK-NEXT: [[TMP41:%.*]] = fmul float [[TMP39]], [[W:%.*]] diff --git a/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll b/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll index 7a4da66ae2151..09003ebacd6ca 100644 --- a/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll +++ b/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll @@ -217,7 +217,7 @@ define i8 @umin_zext_uses(i5 %x, i5 %y) { define i8 @smax_sext_constant(i5 %x) { ; CHECK-LABEL: @smax_sext_constant( ; CHECK-NEXT: [[TMP1:%.*]] = call i5 @llvm.smax.i5(i5 [[X:%.*]], i5 7) -; CHECK-NEXT: [[M:%.*]] = zext i5 [[TMP1]] to i8 +; CHECK-NEXT: [[M:%.*]] = zext nneg i5 [[TMP1]] to i8 ; CHECK-NEXT: ret i8 [[M]] ; %e = sext i5 %x to i8 @@ -322,7 +322,7 @@ define i8 @umax_zext_constant_big(i5 %x) { define i8 @umin_sext_constant(i5 %x) { ; CHECK-LABEL: @umin_sext_constant( ; CHECK-NEXT: [[TMP1:%.*]] = call i5 @llvm.umin.i5(i5 [[X:%.*]], i5 7) -; CHECK-NEXT: [[M:%.*]] = zext i5 [[TMP1]] to i8 +; CHECK-NEXT: [[M:%.*]] = zext nneg i5 [[TMP1]] to i8 ; CHECK-NEXT: ret i8 [[M]] ; %e = sext i5 %x to i8 diff --git a/llvm/test/Transforms/InstCombine/narrow-math.ll b/llvm/test/Transforms/InstCombine/narrow-math.ll index bfff00f62deac..6eacb1ca2c018 100644 --- a/llvm/test/Transforms/InstCombine/narrow-math.ll +++ b/llvm/test/Transforms/InstCombine/narrow-math.ll @@ -141,7 +141,7 @@ define i64 @test2(i32 %V) { ; CHECK-NEXT: [[CALL1:%.*]] = call i32 @callee(), !range [[RNG0]] ; CHECK-NEXT: [[CALL2:%.*]] = call i32 @callee(), !range [[RNG0]] ; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[CALL1]], [[CALL2]] -; CHECK-NEXT: [[ZEXT:%.*]] = zext i32 [[ADD]] to i64 +; CHECK-NEXT: [[ZEXT:%.*]] = zext nneg i32 [[ADD]] to i64 ; CHECK-NEXT: ret i64 [[ZEXT]] ; %call1 = call i32 @callee(), !range !0 @@ -172,7 +172,7 @@ define i64 @test4(i32 %V) { ; CHECK-NEXT: [[CALL1:%.*]] = call i32 @callee(), !range [[RNG0]] ; CHECK-NEXT: [[CALL2:%.*]] = call i32 @callee(), !range [[RNG0]] ; CHECK-NEXT: [[ADD:%.*]] = mul nuw nsw i32 [[CALL1]], [[CALL2]] -; CHECK-NEXT: [[ZEXT:%.*]] = zext i32 [[ADD]] to i64 +; CHECK-NEXT: [[ZEXT:%.*]] = zext nneg i32 [[ADD]] to i64 ; CHECK-NEXT: ret i64 [[ZEXT]] ; %call1 = call i32 @callee(), !range !0 @@ -480,7 +480,7 @@ define i64 @test12(i32 %V) { ; CHECK-NEXT: [[CALL1:%.*]] = call i32 @callee(), !range [[RNG1]] ; CHECK-NEXT: [[CALL2:%.*]] = call i32 @callee(), !range [[RNG1]] ; CHECK-NEXT: [[NARROW:%.*]] = mul nsw i32 [[CALL1]], [[CALL2]] -; CHECK-NEXT: [[ADD:%.*]] = zext i32 [[NARROW]] to i64 +; CHECK-NEXT: [[ADD:%.*]] = zext nneg i32 [[NARROW]] to i64 ; CHECK-NEXT: ret i64 [[ADD]] ; %call1 = call i32 @callee(), !range !1 @@ -614,7 +614,7 @@ define i64 @test18(i32 %V) { define i64 @test19(i32 %V) { ; CHECK-LABEL: @test19( ; CHECK-NEXT: [[CALL1:%.*]] = call i32 @callee(), !range [[RNG0]] -; CHECK-NEXT: [[SEXT1:%.*]] = zext i32 [[CALL1]] to i64 +; CHECK-NEXT: [[SEXT1:%.*]] = zext nneg i32 [[CALL1]] to i64 ; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i64 -2147481648, [[SEXT1]] ; CHECK-NEXT: ret i64 [[SUB]] ; diff --git a/llvm/test/Transforms/InstCombine/select_meta.ll b/llvm/test/Transforms/InstCombine/select_meta.ll index f788dec108dfb..df1e5a82ad5d1 100644 --- a/llvm/test/Transforms/InstCombine/select_meta.ll +++ b/llvm/test/Transforms/InstCombine/select_meta.ll @@ -64,7 +64,7 @@ define i32 @foo2(i32, i32) local_unnamed_addr #0 { define i64 @test43(i32 %a) nounwind { ; CHECK-LABEL: @test43( ; CHECK-NEXT: [[NARROW:%.*]] = call i32 @llvm.smax.i32(i32 [[A:%.*]], i32 0) -; CHECK-NEXT: [[MAX:%.*]] = zext i32 [[NARROW]] to i64 +; CHECK-NEXT: [[MAX:%.*]] = zext nneg i32 [[NARROW]] to i64 ; CHECK-NEXT: ret i64 [[MAX]] ; %a_ext = sext i32 %a to i64 diff --git a/llvm/test/Transforms/InstCombine/sext.ll b/llvm/test/Transforms/InstCombine/sext.ll index c204b37ff85a5..0e7caff0cfdef 100644 --- a/llvm/test/Transforms/InstCombine/sext.ll +++ b/llvm/test/Transforms/InstCombine/sext.ll @@ -12,7 +12,7 @@ declare void @use_vec(<2 x i5>) define i64 @test1(i32 %x) { ; CHECK-LABEL: @test1( ; CHECK-NEXT: [[T:%.*]] = call i32 @llvm.ctpop.i32(i32 [[X:%.*]]), !range [[RNG0:![0-9]+]] -; CHECK-NEXT: [[S:%.*]] = zext i32 [[T]] to i64 +; CHECK-NEXT: [[S:%.*]] = zext nneg i32 [[T]] to i64 ; CHECK-NEXT: ret i64 [[S]] ; %t = call i32 @llvm.ctpop.i32(i32 %x) @@ -23,7 +23,7 @@ define i64 @test1(i32 %x) { define i64 @test2(i32 %x) { ; CHECK-LABEL: @test2( ; CHECK-NEXT: [[T:%.*]] = call i32 @llvm.ctlz.i32(i32 [[X:%.*]], i1 true), !range [[RNG0]] -; CHECK-NEXT: [[S:%.*]] = zext i32 [[T]] to i64 +; CHECK-NEXT: [[S:%.*]] = zext nneg i32 [[T]] to i64 ; CHECK-NEXT: ret i64 [[S]] ; %t = call i32 @llvm.ctlz.i32(i32 %x, i1 true) @@ -34,7 +34,7 @@ define i64 @test2(i32 %x) { define i64 @test3(i32 %x) { ; CHECK-LABEL: @test3( ; CHECK-NEXT: [[T:%.*]] = call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 true), !range [[RNG0]] -; CHECK-NEXT: [[S:%.*]] = zext i32 [[T]] to i64 +; CHECK-NEXT: [[S:%.*]] = zext nneg i32 [[T]] to i64 ; CHECK-NEXT: ret i64 [[S]] ; %t = call i32 @llvm.cttz.i32(i32 %x, i1 true) @@ -45,7 +45,7 @@ define i64 @test3(i32 %x) { define i64 @test4(i32 %x) { ; CHECK-LABEL: @test4( ; CHECK-NEXT: [[T:%.*]] = udiv i32 [[X:%.*]], 3 -; CHECK-NEXT: [[S:%.*]] = zext i32 [[T]] to i64 +; CHECK-NEXT: [[S:%.*]] = zext nneg i32 [[T]] to i64 ; CHECK-NEXT: ret i64 [[S]] ; %t = udiv i32 %x, 3 @@ -56,7 +56,7 @@ define i64 @test4(i32 %x) { define i64 @test5(i32 %x) { ; CHECK-LABEL: @test5( ; CHECK-NEXT: [[T:%.*]] = urem i32 [[X:%.*]], 30000 -; CHECK-NEXT: [[S:%.*]] = zext i32 [[T]] to i64 +; CHECK-NEXT: [[S:%.*]] = zext nneg i32 [[T]] to i64 ; CHECK-NEXT: ret i64 [[S]] ; %t = urem i32 %x, 30000 @@ -68,7 +68,7 @@ define i64 @test6(i32 %x) { ; CHECK-LABEL: @test6( ; CHECK-NEXT: [[U:%.*]] = lshr i32 [[X:%.*]], 3 ; CHECK-NEXT: [[T:%.*]] = mul nuw nsw i32 [[U]], 3 -; CHECK-NEXT: [[S:%.*]] = zext i32 [[T]] to i64 +; CHECK-NEXT: [[S:%.*]] = zext nneg i32 [[T]] to i64 ; CHECK-NEXT: ret i64 [[S]] ; %u = lshr i32 %x, 3 @@ -81,7 +81,7 @@ define i64 @test7(i32 %x) { ; CHECK-LABEL: @test7( ; CHECK-NEXT: [[T:%.*]] = and i32 [[X:%.*]], 511 ; CHECK-NEXT: [[U:%.*]] = sub nuw nsw i32 20000, [[T]] -; CHECK-NEXT: [[S:%.*]] = zext i32 [[U]] to i64 +; CHECK-NEXT: [[S:%.*]] = zext nneg i32 [[U]] to i64 ; CHECK-NEXT: ret i64 [[S]] ; %t = and i32 %x, 511 @@ -296,7 +296,7 @@ define i32 @test17(i1 %x) { define i32 @test18(i16 %x) { ; CHECK-LABEL: @test18( ; CHECK-NEXT: [[SEL:%.*]] = call i16 @llvm.smax.i16(i16 [[X:%.*]], i16 0) -; CHECK-NEXT: [[EXT:%.*]] = zext i16 [[SEL]] to i32 +; CHECK-NEXT: [[EXT:%.*]] = zext nneg i16 [[SEL]] to i32 ; CHECK-NEXT: ret i32 [[EXT]] ; %cmp = icmp slt i16 %x, 0 diff --git a/llvm/test/Transforms/InstCombine/udiv-simplify.ll b/llvm/test/Transforms/InstCombine/udiv-simplify.ll index 724170e376b35..a38d32d792550 100644 --- a/llvm/test/Transforms/InstCombine/udiv-simplify.ll +++ b/llvm/test/Transforms/InstCombine/udiv-simplify.ll @@ -27,7 +27,7 @@ define i64 @test1_PR2274(i32 %x, i32 %g) nounwind { ; CHECK-LABEL: @test1_PR2274( ; CHECK-NEXT: [[Y:%.*]] = lshr i32 [[X:%.*]], 30 ; CHECK-NEXT: [[R:%.*]] = udiv i32 [[Y]], [[G:%.*]] -; CHECK-NEXT: [[Z:%.*]] = zext i32 [[R]] to i64 +; CHECK-NEXT: [[Z:%.*]] = zext nneg i32 [[R]] to i64 ; CHECK-NEXT: ret i64 [[Z]] ; %y = lshr i32 %x, 30 @@ -39,7 +39,7 @@ define i64 @test2_PR2274(i32 %x, i32 %v) nounwind { ; CHECK-LABEL: @test2_PR2274( ; CHECK-NEXT: [[Y:%.*]] = lshr i32 [[X:%.*]], 31 ; CHECK-NEXT: [[R:%.*]] = udiv i32 [[Y]], [[V:%.*]] -; CHECK-NEXT: [[Z:%.*]] = zext i32 [[R]] to i64 +; CHECK-NEXT: [[Z:%.*]] = zext nneg i32 [[R]] to i64 ; CHECK-NEXT: ret i64 [[Z]] ; %y = lshr i32 %x, 31 diff --git a/llvm/test/Transforms/InstCombine/wcslen-1.ll b/llvm/test/Transforms/InstCombine/wcslen-1.ll index 5d05cff6e54b8..4a9a4b9263202 100644 --- a/llvm/test/Transforms/InstCombine/wcslen-1.ll +++ b/llvm/test/Transforms/InstCombine/wcslen-1.ll @@ -175,7 +175,7 @@ define i64 @test_no_simplify2_no_null_opt(i32 %x) #0 { define i64 @test_no_simplify3(i32 %x) { ; CHECK-LABEL: @test_no_simplify3( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 15 -; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[AND]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[AND]] to i64 ; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [13 x i32], ptr @null_hello_mid, i64 0, i64 [[TMP1]] ; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(ptr nonnull [[HELLO_P]]) ; CHECK-NEXT: ret i64 [[HELLO_L]] @@ -189,7 +189,7 @@ define i64 @test_no_simplify3(i32 %x) { define i64 @test_no_simplify3_no_null_opt(i32 %x) #0 { ; CHECK-LABEL: @test_no_simplify3_no_null_opt( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 15 -; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[AND]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[AND]] to i64 ; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [13 x i32], ptr @null_hello_mid, i64 0, i64 [[TMP1]] ; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(ptr [[HELLO_P]]) ; CHECK-NEXT: ret i64 [[HELLO_L]] diff --git a/llvm/test/Transforms/InstCombine/wcslen-3.ll b/llvm/test/Transforms/InstCombine/wcslen-3.ll index c463b6b1e9526..6dc9534c4986e 100644 --- a/llvm/test/Transforms/InstCombine/wcslen-3.ll +++ b/llvm/test/Transforms/InstCombine/wcslen-3.ll @@ -164,7 +164,7 @@ define i64 @test_no_simplify2(i16 %x) { define i64 @test_no_simplify3(i16 %x) { ; CHECK-LABEL: @test_no_simplify3( ; CHECK-NEXT: [[AND:%.*]] = and i16 [[X:%.*]], 15 -; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[AND]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i16 [[AND]] to i64 ; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [13 x i16], ptr @null_hello_mid, i64 0, i64 [[TMP1]] ; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(ptr nonnull [[HELLO_P]]) ; CHECK-NEXT: ret i64 [[HELLO_L]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll index 83523545f84d0..4803b96642afd 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll @@ -82,19 +82,19 @@ define dso_local void @masked_strided1(ptr noalias nocapture readonly %p, ptr no ; SCALAR_TAIL_FOLDING-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]] ; SCALAR_TAIL_FOLDING: if.then: ; SCALAR_TAIL_FOLDING-NEXT: [[MUL:%.*]] = shl nuw nsw i32 [[IX_024]], 1 -; SCALAR_TAIL_FOLDING-NEXT: [[TMP22:%.*]] = zext i32 [[MUL]] to i64 +; SCALAR_TAIL_FOLDING-NEXT: [[TMP22:%.*]] = zext nneg i32 [[MUL]] to i64 ; SCALAR_TAIL_FOLDING-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP22]] ; SCALAR_TAIL_FOLDING-NEXT: [[TMP23:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 ; SCALAR_TAIL_FOLDING-NEXT: [[ADD:%.*]] = or i32 [[MUL]], 1 -; SCALAR_TAIL_FOLDING-NEXT: [[TMP24:%.*]] = zext i32 [[ADD]] to i64 +; SCALAR_TAIL_FOLDING-NEXT: [[TMP24:%.*]] = zext nneg i32 [[ADD]] to i64 ; SCALAR_TAIL_FOLDING-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP24]] ; SCALAR_TAIL_FOLDING-NEXT: [[TMP25:%.*]] = load i8, ptr [[ARRAYIDX4]], align 1 ; SCALAR_TAIL_FOLDING-NEXT: [[SPEC_SELECT_I:%.*]] = call i8 @llvm.smax.i8(i8 [[TMP23]], i8 [[TMP25]]) -; SCALAR_TAIL_FOLDING-NEXT: [[TMP26:%.*]] = zext i32 [[MUL]] to i64 +; SCALAR_TAIL_FOLDING-NEXT: [[TMP26:%.*]] = zext nneg i32 [[MUL]] to i64 ; SCALAR_TAIL_FOLDING-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i8, ptr [[Q]], i64 [[TMP26]] ; SCALAR_TAIL_FOLDING-NEXT: store i8 [[SPEC_SELECT_I]], ptr [[ARRAYIDX6]], align 1 ; SCALAR_TAIL_FOLDING-NEXT: [[SUB:%.*]] = sub i8 0, [[SPEC_SELECT_I]] -; SCALAR_TAIL_FOLDING-NEXT: [[TMP27:%.*]] = zext i32 [[ADD]] to i64 +; SCALAR_TAIL_FOLDING-NEXT: [[TMP27:%.*]] = zext nneg i32 [[ADD]] to i64 ; SCALAR_TAIL_FOLDING-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i8, ptr [[Q]], i64 [[TMP27]] ; SCALAR_TAIL_FOLDING-NEXT: store i8 [[SUB]], ptr [[ARRAYIDX11]], align 1 ; SCALAR_TAIL_FOLDING-NEXT: br label [[FOR_INC]] @@ -239,12 +239,12 @@ define dso_local void @masked_strided2(ptr noalias nocapture readnone %p, ptr no ; SCALAR_TAIL_FOLDING-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; SCALAR_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi [ [[TMP4]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; SCALAR_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = shl nuw nsw [[VEC_IND]], shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) -; SCALAR_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = zext [[TMP7]] to +; SCALAR_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = zext nneg [[TMP7]] to ; SCALAR_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[Q]], [[TMP8]] ; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0( shufflevector ( insertelement ( poison, i8 1, i64 0), poison, zeroinitializer), [[TMP9]], i32 1, shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer)) ; SCALAR_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = icmp ugt [[VEC_IND]], [[BROADCAST_SPLAT]] ; SCALAR_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = or [[TMP7]], shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) -; SCALAR_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = zext [[TMP11]] to +; SCALAR_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = zext nneg [[TMP11]] to ; SCALAR_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[Q]], [[TMP12]] ; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0( shufflevector ( insertelement ( poison, i8 2, i64 0), poison, zeroinitializer), [[TMP13]], i32 1, [[TMP10]]) ; SCALAR_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32() @@ -262,14 +262,14 @@ define dso_local void @masked_strided2(ptr noalias nocapture readnone %p, ptr no ; SCALAR_TAIL_FOLDING: for.body: ; SCALAR_TAIL_FOLDING-NEXT: [[IX_012:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_INC:%.*]] ] ; SCALAR_TAIL_FOLDING-NEXT: [[MUL:%.*]] = shl nuw nsw i32 [[IX_012]], 1 -; SCALAR_TAIL_FOLDING-NEXT: [[TMP17:%.*]] = zext i32 [[MUL]] to i64 +; SCALAR_TAIL_FOLDING-NEXT: [[TMP17:%.*]] = zext nneg i32 [[MUL]] to i64 ; SCALAR_TAIL_FOLDING-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[Q]], i64 [[TMP17]] ; SCALAR_TAIL_FOLDING-NEXT: store i8 1, ptr [[ARRAYIDX]], align 1 ; SCALAR_TAIL_FOLDING-NEXT: [[CMP1:%.*]] = icmp ugt i32 [[IX_012]], [[CONV]] ; SCALAR_TAIL_FOLDING-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]] ; SCALAR_TAIL_FOLDING: if.then: ; SCALAR_TAIL_FOLDING-NEXT: [[ADD:%.*]] = or i32 [[MUL]], 1 -; SCALAR_TAIL_FOLDING-NEXT: [[TMP18:%.*]] = zext i32 [[ADD]] to i64 +; SCALAR_TAIL_FOLDING-NEXT: [[TMP18:%.*]] = zext nneg i32 [[ADD]] to i64 ; SCALAR_TAIL_FOLDING-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i8, ptr [[Q]], i64 [[TMP18]] ; SCALAR_TAIL_FOLDING-NEXT: store i8 2, ptr [[ARRAYIDX3]], align 1 ; SCALAR_TAIL_FOLDING-NEXT: br label [[FOR_INC]] @@ -303,12 +303,12 @@ define dso_local void @masked_strided2(ptr noalias nocapture readnone %p, ptr no ; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi [ [[TMP3]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = shl nuw nsw [[VEC_IND]], shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = zext [[TMP6]] to +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = zext nneg [[TMP6]] to ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[Q]], [[TMP7]] ; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0( shufflevector ( insertelement ( poison, i8 1, i64 0), poison, zeroinitializer), [[TMP8]], i32 1, [[ACTIVE_LANE_MASK]]) ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = icmp ugt [[VEC_IND]], [[BROADCAST_SPLAT]] ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = or [[TMP6]], shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = zext [[TMP10]] to +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = zext nneg [[TMP10]] to ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[Q]], [[TMP11]] ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP9]], zeroinitializer ; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0( shufflevector ( insertelement ( poison, i8 2, i64 0), poison, zeroinitializer), [[TMP12]], i32 1, [[TMP13]]) @@ -404,12 +404,12 @@ define dso_local void @masked_strided3(ptr noalias nocapture readnone %p, ptr no ; SCALAR_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi [ [[TMP4]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; SCALAR_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = shl nuw nsw [[VEC_IND]], shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) ; SCALAR_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = icmp ugt [[VEC_IND]], [[BROADCAST_SPLAT]] -; SCALAR_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = zext [[TMP7]] to +; SCALAR_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = zext nneg [[TMP7]] to ; SCALAR_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[Q]], [[TMP9]] ; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0( shufflevector ( insertelement ( poison, i8 1, i64 0), poison, zeroinitializer), [[TMP10]], i32 1, [[TMP8]]) ; SCALAR_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = icmp ugt [[VEC_IND]], [[BROADCAST_SPLAT2]] ; SCALAR_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = or [[TMP7]], shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) -; SCALAR_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = zext [[TMP12]] to +; SCALAR_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = zext nneg [[TMP12]] to ; SCALAR_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[Q]], [[TMP13]] ; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0( shufflevector ( insertelement ( poison, i8 2, i64 0), poison, zeroinitializer), [[TMP14]], i32 1, [[TMP11]]) ; SCALAR_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = call i32 @llvm.vscale.i32() @@ -430,7 +430,7 @@ define dso_local void @masked_strided3(ptr noalias nocapture readnone %p, ptr no ; SCALAR_TAIL_FOLDING-NEXT: [[CMP1:%.*]] = icmp ugt i32 [[IX_018]], [[CONV]] ; SCALAR_TAIL_FOLDING-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[IF_END:%.*]] ; SCALAR_TAIL_FOLDING: if.then: -; SCALAR_TAIL_FOLDING-NEXT: [[TMP18:%.*]] = zext i32 [[MUL]] to i64 +; SCALAR_TAIL_FOLDING-NEXT: [[TMP18:%.*]] = zext nneg i32 [[MUL]] to i64 ; SCALAR_TAIL_FOLDING-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[Q]], i64 [[TMP18]] ; SCALAR_TAIL_FOLDING-NEXT: store i8 1, ptr [[ARRAYIDX]], align 1 ; SCALAR_TAIL_FOLDING-NEXT: br label [[IF_END]] @@ -439,7 +439,7 @@ define dso_local void @masked_strided3(ptr noalias nocapture readnone %p, ptr no ; SCALAR_TAIL_FOLDING-NEXT: br i1 [[CMP4]], label [[IF_THEN6:%.*]], label [[FOR_INC]] ; SCALAR_TAIL_FOLDING: if.then6: ; SCALAR_TAIL_FOLDING-NEXT: [[ADD:%.*]] = or i32 [[MUL]], 1 -; SCALAR_TAIL_FOLDING-NEXT: [[TMP19:%.*]] = zext i32 [[ADD]] to i64 +; SCALAR_TAIL_FOLDING-NEXT: [[TMP19:%.*]] = zext nneg i32 [[ADD]] to i64 ; SCALAR_TAIL_FOLDING-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i8, ptr [[Q]], i64 [[TMP19]] ; SCALAR_TAIL_FOLDING-NEXT: store i8 2, ptr [[ARRAYIDX7]], align 1 ; SCALAR_TAIL_FOLDING-NEXT: br label [[FOR_INC]] @@ -477,13 +477,13 @@ define dso_local void @masked_strided3(ptr noalias nocapture readnone %p, ptr no ; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND:%.*]] = phi [ [[TMP3]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP6:%.*]] = shl nuw nsw [[VEC_IND]], shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP7:%.*]] = icmp ugt [[VEC_IND]], [[BROADCAST_SPLAT]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = zext [[TMP6]] to +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = zext nneg [[TMP6]] to ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[Q]], [[TMP8]] ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP7]], zeroinitializer ; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0( shufflevector ( insertelement ( poison, i8 1, i64 0), poison, zeroinitializer), [[TMP9]], i32 1, [[TMP10]]) ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = icmp ugt [[VEC_IND]], [[BROADCAST_SPLAT2]] ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = or [[TMP6]], shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = zext [[TMP12]] to +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = zext nneg [[TMP12]] to ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[Q]], [[TMP13]] ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP11]], zeroinitializer ; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0( shufflevector ( insertelement ( poison, i8 2, i64 0), poison, zeroinitializer), [[TMP14]], i32 1, [[TMP15]]) diff --git a/llvm/test/Transforms/LoopVectorize/induction.ll b/llvm/test/Transforms/LoopVectorize/induction.ll index 90ad054c5a22e..2df55bdf89a00 100644 --- a/llvm/test/Transforms/LoopVectorize/induction.ll +++ b/llvm/test/Transforms/LoopVectorize/induction.ll @@ -2083,7 +2083,7 @@ define i32 @scalarize_induction_variable_05(ptr %a, i32 %x, i1 %c, i32 %n) { ; IND: for.body: ; IND-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[I_NEXT:%.*]], [[IF_END:%.*]] ] ; IND-NEXT: [[SUM:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[VAR4:%.*]], [[IF_END]] ] -; IND-NEXT: [[TMP16:%.*]] = zext i32 [[I]] to i64 +; IND-NEXT: [[TMP16:%.*]] = zext nneg i32 [[I]] to i64 ; IND-NEXT: [[VAR0:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP16]] ; IND-NEXT: [[VAR1:%.*]] = load i32, ptr [[VAR0]], align 4 ; IND-NEXT: br i1 [[C]], label [[IF_THEN:%.*]], label [[IF_END]] @@ -2173,7 +2173,7 @@ define i32 @scalarize_induction_variable_05(ptr %a, i32 %x, i1 %c, i32 %n) { ; UNROLL: for.body: ; UNROLL-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[I_NEXT:%.*]], [[IF_END:%.*]] ] ; UNROLL-NEXT: [[SUM:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[VAR4:%.*]], [[IF_END]] ] -; UNROLL-NEXT: [[TMP26:%.*]] = zext i32 [[I]] to i64 +; UNROLL-NEXT: [[TMP26:%.*]] = zext nneg i32 [[I]] to i64 ; UNROLL-NEXT: [[VAR0:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP26]] ; UNROLL-NEXT: [[VAR1:%.*]] = load i32, ptr [[VAR0]], align 4 ; UNROLL-NEXT: br i1 [[C]], label [[IF_THEN:%.*]], label [[IF_END]] @@ -2397,7 +2397,7 @@ define i32 @scalarize_induction_variable_05(ptr %a, i32 %x, i1 %c, i32 %n) { ; INTERLEAVE: for.body: ; INTERLEAVE-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[I_NEXT:%.*]], [[IF_END:%.*]] ] ; INTERLEAVE-NEXT: [[SUM:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[VAR4:%.*]], [[IF_END]] ] -; INTERLEAVE-NEXT: [[TMP46:%.*]] = zext i32 [[I]] to i64 +; INTERLEAVE-NEXT: [[TMP46:%.*]] = zext nneg i32 [[I]] to i64 ; INTERLEAVE-NEXT: [[VAR0:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP46]] ; INTERLEAVE-NEXT: [[VAR1:%.*]] = load i32, ptr [[VAR0]], align 4 ; INTERLEAVE-NEXT: br i1 [[C]], label [[IF_THEN:%.*]], label [[IF_END]] diff --git a/llvm/test/Transforms/PhaseOrdering/gvn-replacement-vs-hoist.ll b/llvm/test/Transforms/PhaseOrdering/gvn-replacement-vs-hoist.ll index ea863f7355ad9..522ebf9dcc04b 100644 --- a/llvm/test/Transforms/PhaseOrdering/gvn-replacement-vs-hoist.ll +++ b/llvm/test/Transforms/PhaseOrdering/gvn-replacement-vs-hoist.ll @@ -6,7 +6,7 @@ define void @test(ptr noundef %a, i32 noundef %beam) { ; CHECK-SAME: (ptr nocapture noundef writeonly [[A:%.*]], i32 noundef [[BEAM:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[MUL:%.*]] = shl nuw nsw i32 [[BEAM]], 1 -; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[MUL]] to i64 +; CHECK-NEXT: [[IDXPROM:%.*]] = zext nneg i32 [[MUL]] to i64 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IDXPROM]] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: @@ -20,7 +20,7 @@ define void @test(ptr noundef %a, i32 noundef %beam) { ; CHECK-NEXT: br label [[FOR_INC]] ; CHECK: if.else: ; CHECK-NEXT: [[MUL2:%.*]] = shl nuw nsw i32 [[I_06]], 1 -; CHECK-NEXT: [[IDXPROM3:%.*]] = zext i32 [[MUL2]] to i64 +; CHECK-NEXT: [[IDXPROM3:%.*]] = zext nneg i32 [[MUL2]] to i64 ; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IDXPROM3]] ; CHECK-NEXT: store i32 1, ptr [[ARRAYIDX4]], align 4 ; CHECK-NEXT: br label [[FOR_INC]] diff --git a/llvm/test/Transforms/PhaseOrdering/lto-licm.ll b/llvm/test/Transforms/PhaseOrdering/lto-licm.ll index 1a5a67d8241a1..763e266e6a382 100644 --- a/llvm/test/Transforms/PhaseOrdering/lto-licm.ll +++ b/llvm/test/Transforms/PhaseOrdering/lto-licm.ll @@ -11,7 +11,7 @@ define void @hoist_fdiv(ptr %a, float %b) { ; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[I_0]], 1024 ; CHECK-NEXT: br i1 [[CMP_NOT]], label [[FOR_END:%.*]], label [[FOR_INC]] ; CHECK: for.inc: -; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[I_0]] to i64 +; CHECK-NEXT: [[IDXPROM:%.*]] = zext nneg i32 [[I_0]] to i64 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IDXPROM]] ; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[TMP2:%.*]] = fmul fast float [[TMP1]], [[TMP0]] diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/getelementptr.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/getelementptr.ll index 9a55e1eee5bd4..63934a2cc9646 100644 --- a/llvm/test/Transforms/SLPVectorizer/AArch64/getelementptr.ll +++ b/llvm/test/Transforms/SLPVectorizer/AArch64/getelementptr.ll @@ -67,7 +67,7 @@ define i32 @getelementptr_4x32(ptr nocapture readonly %g, i32 %n, i32 %x, i32 %y ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i32> [[TMP3]], <2 x i32> poison, <2 x i32> zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = add nsw <2 x i32> [[TMP4]], [[TMP0]] ; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i32> [[TMP5]], i64 0 -; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64 +; CHECK-NEXT: [[TMP7:%.*]] = zext nneg i32 [[TMP6]] to i64 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[G:%.*]], i64 [[TMP7]] ; CHECK-NEXT: [[T6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ADD1:%.*]] = add nsw i32 [[T6]], [[SUM_032]] @@ -159,12 +159,12 @@ define i32 @getelementptr_2x32(ptr nocapture readonly %g, i32 %n, i32 %x, i32 %y ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[SUM_032:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[ADD16]], [[FOR_BODY]] ] ; CHECK-NEXT: [[T4:%.*]] = shl nuw nsw i32 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[T4]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = zext nneg i32 [[T4]] to i64 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[G:%.*]], i64 [[TMP2]] ; CHECK-NEXT: [[T6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ADD1:%.*]] = add nsw i32 [[T6]], [[SUM_032]] ; CHECK-NEXT: [[T7:%.*]] = or i32 [[T4]], 1 -; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[T7]] to i64 +; CHECK-NEXT: [[TMP3:%.*]] = zext nneg i32 [[T7]] to i64 ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, ptr [[G]], i64 [[TMP3]] ; CHECK-NEXT: [[T8:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4 ; CHECK-NEXT: [[ADD6:%.*]] = add nsw i32 [[ADD1]], [[T8]] diff --git a/llvm/test/Transforms/SimpleLoopUnswitch/AMDGPU/uniform-unswitch.ll b/llvm/test/Transforms/SimpleLoopUnswitch/AMDGPU/uniform-unswitch.ll index cbbf4d6e7be19..2069efd12d27a 100644 --- a/llvm/test/Transforms/SimpleLoopUnswitch/AMDGPU/uniform-unswitch.ll +++ b/llvm/test/Transforms/SimpleLoopUnswitch/AMDGPU/uniform-unswitch.ll @@ -34,7 +34,7 @@ define amdgpu_kernel void @uniform_unswitch(ptr nocapture %out, i32 %n, i32 %x) ; CHECK-NEXT: [[I_07:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_INC:%.*]] ] ; CHECK-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]] ; CHECK: if.then: -; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[I_07]] to i64 +; CHECK-NEXT: [[TMP0:%.*]] = zext nneg i32 [[I_07]] to i64 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[OUT_GLOBAL]], i64 [[TMP0]] ; CHECK-NEXT: store i32 [[I_07]], ptr addrspace(1) [[ARRAYIDX]], align 4 ; CHECK-NEXT: br label [[FOR_INC]]