diff --git a/clang/test/Headers/__clang_hip_math.hip b/clang/test/Headers/__clang_hip_math.hip index c0f4a06acbb8e..68cbf86041e38 100644 --- a/clang/test/Headers/__clang_hip_math.hip +++ b/clang/test/Headers/__clang_hip_math.hip @@ -2451,7 +2451,7 @@ extern "C" __device__ double test_modf(double x, double* y) { // CHECK-NEXT: [[RETVAL_0_I_I:%.*]] = phi i64 [ 0, [[CLEANUP_I_I_I]] ], [ [[__R_0_I_I_I]], [[WHILE_COND_I_I_I]] ], [ 0, [[CLEANUP_I36_I_I]] ], [ [[__R_0_I32_I_I]], [[WHILE_COND_I30_I_I]] ], [ 0, [[CLEANUP_I20_I_I]] ], [ [[__R_0_I16_I_I]], [[WHILE_COND_I14_I_I]] ] // CHECK-NEXT: [[CONV_I:%.*]] = trunc i64 [[RETVAL_0_I_I]] to i32 // CHECK-NEXT: [[BF_VALUE_I:%.*]] = and i32 [[CONV_I]], 4194303 -// CHECK-NEXT: [[BF_SET9_I:%.*]] = or i32 [[BF_VALUE_I]], 2143289344 +// CHECK-NEXT: [[BF_SET9_I:%.*]] = or disjoint i32 [[BF_VALUE_I]], 2143289344 // CHECK-NEXT: [[TMP10:%.*]] = bitcast i32 [[BF_SET9_I]] to float // CHECK-NEXT: ret float [[TMP10]] // @@ -2549,7 +2549,7 @@ extern "C" __device__ float test_nanf(const char *tag) { // CHECK: _ZL3nanPKc.exit: // CHECK-NEXT: [[RETVAL_0_I_I:%.*]] = phi i64 [ 0, [[CLEANUP_I_I_I]] ], [ [[__R_0_I_I_I]], [[WHILE_COND_I_I_I]] ], [ 0, [[CLEANUP_I36_I_I]] ], [ [[__R_0_I32_I_I]], [[WHILE_COND_I30_I_I]] ], [ 0, [[CLEANUP_I20_I_I]] ], [ [[__R_0_I16_I_I]], [[WHILE_COND_I14_I_I]] ] // CHECK-NEXT: [[BF_VALUE_I:%.*]] = and i64 [[RETVAL_0_I_I]], 2251799813685247 -// CHECK-NEXT: [[BF_SET9_I:%.*]] = or i64 [[BF_VALUE_I]], 9221120237041090560 +// CHECK-NEXT: [[BF_SET9_I:%.*]] = or disjoint i64 [[BF_VALUE_I]], 9221120237041090560 // CHECK-NEXT: [[TMP10:%.*]] = bitcast i64 [[BF_SET9_I]] to double // CHECK-NEXT: ret double [[TMP10]] // diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp index 4d19bd12d8f6f..df65b83f2f06c 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp @@ -264,6 +264,16 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, if (ShrinkDemandedConstant(I, 1, DemandedMask)) return I; + // Infer disjoint flag if no common bits are set. + if (!cast(I)->isDisjoint()) { + WithCache LHSCache(I->getOperand(0), LHSKnown), + RHSCache(I->getOperand(1), RHSKnown); + if (haveNoCommonBitsSet(LHSCache, RHSCache, SQ.getWithInstruction(I))) { + cast(I)->setIsDisjoint(true); + return I; + } + } + break; } case Instruction::Xor: { diff --git a/llvm/test/Transforms/InstCombine/2010-11-01-lshr-mask.ll b/llvm/test/Transforms/InstCombine/2010-11-01-lshr-mask.ll index 3081baa2db281..ccbafbb197b66 100644 --- a/llvm/test/Transforms/InstCombine/2010-11-01-lshr-mask.ll +++ b/llvm/test/Transforms/InstCombine/2010-11-01-lshr-mask.ll @@ -33,9 +33,9 @@ define i8 @foo(i8 %arg, i8 %arg1) { ; CHECK-NEXT: [[T4:%.*]] = and i8 [[ARG1]], 33 ; CHECK-NEXT: [[T5:%.*]] = sub nsw i8 40, [[T2]] ; CHECK-NEXT: [[T6:%.*]] = and i8 [[T5]], 84 -; CHECK-NEXT: [[T7:%.*]] = or i8 [[T4]], [[T6]] +; CHECK-NEXT: [[T7:%.*]] = or disjoint i8 [[T4]], [[T6]] ; CHECK-NEXT: [[T8:%.*]] = xor i8 [[T]], [[T3]] -; CHECK-NEXT: [[T9:%.*]] = or i8 [[T7]], [[T8]] +; CHECK-NEXT: [[T9:%.*]] = or disjoint i8 [[T7]], [[T8]] ; CHECK-NEXT: [[TMP1:%.*]] = lshr i8 [[T8]], 2 ; CHECK-NEXT: [[T11:%.*]] = and i8 [[TMP1]], 32 ; CHECK-NEXT: [[T12:%.*]] = xor i8 [[T11]], [[T9]] diff --git a/llvm/test/Transforms/InstCombine/add.ll b/llvm/test/Transforms/InstCombine/add.ll index b0b5f955c9fb0..db9eafe998ebb 100644 --- a/llvm/test/Transforms/InstCombine/add.ll +++ b/llvm/test/Transforms/InstCombine/add.ll @@ -764,7 +764,7 @@ define i32 @test29(i32 %x, i32 %y) { ; CHECK-NEXT: [[TMP_2:%.*]] = sub i32 [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[TMP_7:%.*]] = and i32 [[X]], 63 ; CHECK-NEXT: [[TMP_9:%.*]] = and i32 [[TMP_2]], -64 -; CHECK-NEXT: [[TMP_10:%.*]] = or i32 [[TMP_7]], [[TMP_9]] +; CHECK-NEXT: [[TMP_10:%.*]] = or disjoint i32 [[TMP_7]], [[TMP_9]] ; CHECK-NEXT: ret i32 [[TMP_10]] ; %tmp.2 = sub i32 %x, %y @@ -1499,7 +1499,7 @@ define i8 @add_like_or_n1(i8 %x) { define i8 @add_like_or_t2_extrause(i8 %x) { ; CHECK-LABEL: @add_like_or_t2_extrause( ; CHECK-NEXT: [[I0:%.*]] = shl i8 [[X:%.*]], 4 -; CHECK-NEXT: [[I1:%.*]] = or i8 [[I0]], 15 +; CHECK-NEXT: [[I1:%.*]] = or disjoint i8 [[I0]], 15 ; CHECK-NEXT: call void @use(i8 [[I1]]) ; CHECK-NEXT: [[R:%.*]] = add i8 [[I0]], 57 ; CHECK-NEXT: ret i8 [[R]] @@ -2361,7 +2361,7 @@ define { i64, i64 } @PR57576(i64 noundef %x, i64 noundef %y, i64 noundef %z, i64 ; CHECK-NEXT: [[ZY:%.*]] = zext i64 [[Y:%.*]] to i128 ; CHECK-NEXT: [[ZZ:%.*]] = zext i64 [[Z:%.*]] to i128 ; CHECK-NEXT: [[SHY:%.*]] = shl nuw i128 [[ZY]], 64 -; CHECK-NEXT: [[XY:%.*]] = or i128 [[SHY]], [[ZX]] +; CHECK-NEXT: [[XY:%.*]] = or disjoint i128 [[SHY]], [[ZX]] ; CHECK-NEXT: [[SUB:%.*]] = sub i128 [[XY]], [[ZZ]] ; CHECK-NEXT: [[T:%.*]] = trunc i128 [[SUB]] to i64 ; CHECK-NEXT: [[TMP1:%.*]] = lshr i128 [[SUB]], 64 diff --git a/llvm/test/Transforms/InstCombine/and-or-not.ll b/llvm/test/Transforms/InstCombine/and-or-not.ll index 32a12199020f0..c896c8f100380 100644 --- a/llvm/test/Transforms/InstCombine/and-or-not.ll +++ b/llvm/test/Transforms/InstCombine/and-or-not.ll @@ -553,7 +553,7 @@ define i32 @or_to_nxor_multiuse(i32 %a, i32 %b) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[OR:%.*]] = or i32 [[A]], [[B]] ; CHECK-NEXT: [[NOTOR:%.*]] = xor i32 [[OR]], -1 -; CHECK-NEXT: [[OR2:%.*]] = or i32 [[AND]], [[NOTOR]] +; CHECK-NEXT: [[OR2:%.*]] = or disjoint i32 [[AND]], [[NOTOR]] ; CHECK-NEXT: [[MUL1:%.*]] = mul i32 [[AND]], [[NOTOR]] ; CHECK-NEXT: [[MUL2:%.*]] = mul i32 [[MUL1]], [[OR2]] ; CHECK-NEXT: ret i32 [[MUL2]] diff --git a/llvm/test/Transforms/InstCombine/and-or.ll b/llvm/test/Transforms/InstCombine/and-or.ll index 631da498e6644..b4ef27607121d 100644 --- a/llvm/test/Transforms/InstCombine/and-or.ll +++ b/llvm/test/Transforms/InstCombine/and-or.ll @@ -217,7 +217,7 @@ define i8 @or_and2_or2(i8 %x) { ; CHECK-NEXT: [[X2:%.*]] = and i8 [[O2]], 66 ; CHECK-NEXT: call void @use(i8 [[X2]]) ; CHECK-NEXT: [[BITFIELD:%.*]] = and i8 [[X]], -8 -; CHECK-NEXT: [[R:%.*]] = or i8 [[BITFIELD]], 3 +; CHECK-NEXT: [[R:%.*]] = or disjoint i8 [[BITFIELD]], 3 ; CHECK-NEXT: ret i8 [[R]] ; %o1 = or i8 %x, 1 @@ -243,7 +243,7 @@ define <2 x i8> @or_and2_or2_splat(<2 x i8> %x) { ; CHECK-NEXT: [[X2:%.*]] = and <2 x i8> [[O2]], ; CHECK-NEXT: call void @use_vec(<2 x i8> [[X2]]) ; CHECK-NEXT: [[BITFIELD:%.*]] = and <2 x i8> [[X]], -; CHECK-NEXT: [[R:%.*]] = or <2 x i8> [[BITFIELD]], +; CHECK-NEXT: [[R:%.*]] = or disjoint <2 x i8> [[BITFIELD]], ; CHECK-NEXT: ret <2 x i8> [[R]] ; %o1 = or <2 x i8> %x, @@ -355,7 +355,7 @@ define i64 @or_or_and_complex(i64 %i) { ; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[I]], 8 ; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP1]], 71777214294589695 ; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP2]], -71777214294589696 -; CHECK-NEXT: [[OR27:%.*]] = or i64 [[TMP3]], [[TMP4]] +; CHECK-NEXT: [[OR27:%.*]] = or disjoint i64 [[TMP3]], [[TMP4]] ; CHECK-NEXT: ret i64 [[OR27]] ; %1 = lshr i64 %i, 8 diff --git a/llvm/test/Transforms/InstCombine/and.ll b/llvm/test/Transforms/InstCombine/and.ll index 386ee38070501..79857f3efbc18 100644 --- a/llvm/test/Transforms/InstCombine/and.ll +++ b/llvm/test/Transforms/InstCombine/and.ll @@ -2433,7 +2433,7 @@ define i8 @negate_lowbitmask_use2(i8 %x, i8 %y) { define i64 @test_and_or_constexpr_infloop() { ; CHECK-LABEL: @test_and_or_constexpr_infloop( ; CHECK-NEXT: [[AND:%.*]] = and i64 ptrtoint (ptr @g to i64), -8 -; CHECK-NEXT: [[OR:%.*]] = or i64 [[AND]], 1 +; CHECK-NEXT: [[OR:%.*]] = or disjoint i64 [[AND]], 1 ; CHECK-NEXT: ret i64 [[OR]] ; %and = and i64 ptrtoint (ptr @g to i64), -8 diff --git a/llvm/test/Transforms/InstCombine/apint-shift.ll b/llvm/test/Transforms/InstCombine/apint-shift.ll index 377cc9978c5b7..05c3db70ce1ca 100644 --- a/llvm/test/Transforms/InstCombine/apint-shift.ll +++ b/llvm/test/Transforms/InstCombine/apint-shift.ll @@ -273,7 +273,7 @@ define i18 @test13(i18 %x) { define i35 @test14(i35 %A) { ; CHECK-LABEL: @test14( ; CHECK-NEXT: [[B:%.*]] = and i35 [[A:%.*]], -19760 -; CHECK-NEXT: [[C:%.*]] = or i35 [[B]], 19744 +; CHECK-NEXT: [[C:%.*]] = or disjoint i35 [[B]], 19744 ; CHECK-NEXT: ret i35 [[C]] ; %B = lshr i35 %A, 4 diff --git a/llvm/test/Transforms/InstCombine/binop-and-shifts.ll b/llvm/test/Transforms/InstCombine/binop-and-shifts.ll index 45fd87be3c331..148963894b89f 100644 --- a/llvm/test/Transforms/InstCombine/binop-and-shifts.ll +++ b/llvm/test/Transforms/InstCombine/binop-and-shifts.ll @@ -365,7 +365,7 @@ define i8 @lshr_xor_or_good_mask(i8 %x, i8 %y) { ; CHECK-LABEL: @lshr_xor_or_good_mask( ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = lshr i8 [[TMP1]], 4 -; CHECK-NEXT: [[BW1:%.*]] = or i8 [[TMP2]], 48 +; CHECK-NEXT: [[BW1:%.*]] = or disjoint i8 [[TMP2]], 48 ; CHECK-NEXT: ret i8 [[BW1]] ; %shift1 = lshr i8 %x, 4 diff --git a/llvm/test/Transforms/InstCombine/binop-of-displaced-shifts.ll b/llvm/test/Transforms/InstCombine/binop-of-displaced-shifts.ll index 78f4550464681..c86dfde6ddece 100644 --- a/llvm/test/Transforms/InstCombine/binop-of-displaced-shifts.ll +++ b/llvm/test/Transforms/InstCombine/binop-of-displaced-shifts.ll @@ -271,7 +271,7 @@ define i8 @mismatched_shifts(i8 %x) { ; CHECK-NEXT: [[SHIFT:%.*]] = shl i8 16, [[X]] ; CHECK-NEXT: [[ADD:%.*]] = add i8 [[X]], 1 ; CHECK-NEXT: [[SHIFT2:%.*]] = lshr i8 3, [[ADD]] -; CHECK-NEXT: [[BINOP:%.*]] = or i8 [[SHIFT]], [[SHIFT2]] +; CHECK-NEXT: [[BINOP:%.*]] = or disjoint i8 [[SHIFT]], [[SHIFT2]] ; CHECK-NEXT: ret i8 [[BINOP]] ; %shift = shl i8 16, %x diff --git a/llvm/test/Transforms/InstCombine/bitcast-inselt-bitcast.ll b/llvm/test/Transforms/InstCombine/bitcast-inselt-bitcast.ll index b99111580277d..410a441f7778e 100644 --- a/llvm/test/Transforms/InstCombine/bitcast-inselt-bitcast.ll +++ b/llvm/test/Transforms/InstCombine/bitcast-inselt-bitcast.ll @@ -17,7 +17,7 @@ define i16 @insert0_v2i8(i16 %x, i8 %y) { ; LE-LABEL: @insert0_v2i8( ; LE-NEXT: [[TMP1:%.*]] = and i16 [[X:%.*]], -256 ; LE-NEXT: [[TMP2:%.*]] = zext i8 [[Y:%.*]] to i16 -; LE-NEXT: [[R:%.*]] = or i16 [[TMP1]], [[TMP2]] +; LE-NEXT: [[R:%.*]] = or disjoint i16 [[TMP1]], [[TMP2]] ; LE-NEXT: ret i16 [[R]] ; %v = bitcast i16 %x to <2 x i8> @@ -33,7 +33,7 @@ define i16 @insert1_v2i8(i16 %x, i8 %y) { ; BE-LABEL: @insert1_v2i8( ; BE-NEXT: [[TMP1:%.*]] = and i16 [[X:%.*]], -256 ; BE-NEXT: [[TMP2:%.*]] = zext i8 [[Y:%.*]] to i16 -; BE-NEXT: [[R:%.*]] = or i16 [[TMP1]], [[TMP2]] +; BE-NEXT: [[R:%.*]] = or disjoint i16 [[TMP1]], [[TMP2]] ; BE-NEXT: ret i16 [[R]] ; ; LE-LABEL: @insert1_v2i8( @@ -61,7 +61,7 @@ define i32 @insert0_v4i8(i32 %x, i8 %y) { ; LE-LABEL: @insert0_v4i8( ; LE-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], -256 ; LE-NEXT: [[TMP2:%.*]] = zext i8 [[Y:%.*]] to i32 -; LE-NEXT: [[R:%.*]] = or i32 [[TMP1]], [[TMP2]] +; LE-NEXT: [[R:%.*]] = or disjoint i32 [[TMP1]], [[TMP2]] ; LE-NEXT: ret i32 [[R]] ; %v = bitcast i32 %x to <4 x i8> @@ -100,7 +100,7 @@ define i64 @insert0_v4i16(i64 %x, i16 %y) { ; LE-LABEL: @insert0_v4i16( ; LE-NEXT: [[TMP1:%.*]] = and i64 [[X:%.*]], -65536 ; LE-NEXT: [[TMP2:%.*]] = zext i16 [[Y:%.*]] to i64 -; LE-NEXT: [[R:%.*]] = or i64 [[TMP1]], [[TMP2]] +; LE-NEXT: [[R:%.*]] = or disjoint i64 [[TMP1]], [[TMP2]] ; LE-NEXT: ret i64 [[R]] ; %v = bitcast i64 %x to <4 x i16> @@ -131,7 +131,7 @@ define i64 @insert3_v4i16(i64 %x, i16 %y) { ; BE-LABEL: @insert3_v4i16( ; BE-NEXT: [[TMP1:%.*]] = and i64 [[X:%.*]], -65536 ; BE-NEXT: [[TMP2:%.*]] = zext i16 [[Y:%.*]] to i64 -; BE-NEXT: [[R:%.*]] = or i64 [[TMP1]], [[TMP2]] +; BE-NEXT: [[R:%.*]] = or disjoint i64 [[TMP1]], [[TMP2]] ; BE-NEXT: ret i64 [[R]] ; ; LE-LABEL: @insert3_v4i16( diff --git a/llvm/test/Transforms/InstCombine/bitreverse.ll b/llvm/test/Transforms/InstCombine/bitreverse.ll index dca52e2c545e1..bf09ffe141012 100644 --- a/llvm/test/Transforms/InstCombine/bitreverse.ll +++ b/llvm/test/Transforms/InstCombine/bitreverse.ll @@ -243,7 +243,7 @@ define i8 @rev8_mul_and_lshr(i8 %0) { ; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], 139536 ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw nsw i64 [[TMP2]], 32800 ; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], 558144 -; CHECK-NEXT: [[TMP7:%.*]] = or i64 [[TMP4]], [[TMP6]] +; CHECK-NEXT: [[TMP7:%.*]] = or disjoint i64 [[TMP4]], [[TMP6]] ; CHECK-NEXT: [[TMP8:%.*]] = mul nuw nsw i64 [[TMP7]], 65793 ; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP8]], 16 ; CHECK-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i8 diff --git a/llvm/test/Transforms/InstCombine/bswap.ll b/llvm/test/Transforms/InstCombine/bswap.ll index 631d02ad8d806..756e898b18eba 100644 --- a/llvm/test/Transforms/InstCombine/bswap.ll +++ b/llvm/test/Transforms/InstCombine/bswap.ll @@ -42,7 +42,7 @@ define i16 @test1_trunc(i32 %i) { ; CHECK-NEXT: [[T1:%.*]] = lshr i32 [[I:%.*]], 24 ; CHECK-NEXT: [[T3:%.*]] = lshr i32 [[I]], 8 ; CHECK-NEXT: [[T4:%.*]] = and i32 [[T3]], 65280 -; CHECK-NEXT: [[T5:%.*]] = or i32 [[T1]], [[T4]] +; CHECK-NEXT: [[T5:%.*]] = or disjoint i32 [[T1]], [[T4]] ; CHECK-NEXT: [[T13:%.*]] = trunc i32 [[T5]] to i16 ; CHECK-NEXT: ret i16 [[T13]] ; @@ -59,7 +59,7 @@ define i16 @test1_trunc_extra_use(i32 %i) { ; CHECK-NEXT: [[T1:%.*]] = lshr i32 [[I:%.*]], 24 ; CHECK-NEXT: [[T3:%.*]] = lshr i32 [[I]], 8 ; CHECK-NEXT: [[T4:%.*]] = and i32 [[T3]], 65280 -; CHECK-NEXT: [[T5:%.*]] = or i32 [[T1]], [[T4]] +; CHECK-NEXT: [[T5:%.*]] = or disjoint i32 [[T1]], [[T4]] ; CHECK-NEXT: call void @extra_use(i32 [[T5]]) ; CHECK-NEXT: [[T13:%.*]] = trunc i32 [[T5]] to i16 ; CHECK-NEXT: ret i16 [[T13]] @@ -605,7 +605,7 @@ define i64 @bswap_and_mask_1(i64 %0) { ; CHECK-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP0:%.*]], 56 ; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP0]], 40 ; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], 65280 -; CHECK-NEXT: [[TMP5:%.*]] = or i64 [[TMP4]], [[TMP2]] +; CHECK-NEXT: [[TMP5:%.*]] = or disjoint i64 [[TMP4]], [[TMP2]] ; CHECK-NEXT: ret i64 [[TMP5]] ; %2 = lshr i64 %0, 56 @@ -781,7 +781,7 @@ define i16 @trunc_bswap_i160(ptr %a0) { ; CHECK-NEXT: [[SH_DIFF:%.*]] = lshr i160 [[LOAD]], 120 ; CHECK-NEXT: [[TR_SH_DIFF:%.*]] = trunc i160 [[SH_DIFF]] to i16 ; CHECK-NEXT: [[SHL:%.*]] = and i16 [[TR_SH_DIFF]], -256 -; CHECK-NEXT: [[OR:%.*]] = or i16 [[AND1]], [[SHL]] +; CHECK-NEXT: [[OR:%.*]] = or disjoint i16 [[AND1]], [[SHL]] ; CHECK-NEXT: ret i16 [[OR]] ; %load = load i160, ptr %a0, align 4 diff --git a/llvm/test/Transforms/InstCombine/cast-mul-select.ll b/llvm/test/Transforms/InstCombine/cast-mul-select.ll index 1dd5066856ed5..581c10de35e4f 100644 --- a/llvm/test/Transforms/InstCombine/cast-mul-select.ll +++ b/llvm/test/Transforms/InstCombine/cast-mul-select.ll @@ -149,7 +149,7 @@ define i32 @eval_sext_multi_use_in_one_inst(i32 %x) { ; CHECK-NEXT: [[T:%.*]] = trunc i32 [[X:%.*]] to i16 ; CHECK-NEXT: [[A:%.*]] = and i16 [[T]], 14 ; CHECK-NEXT: [[M:%.*]] = mul nuw nsw i16 [[A]], [[A]] -; CHECK-NEXT: [[O:%.*]] = or i16 [[M]], -32768 +; CHECK-NEXT: [[O:%.*]] = or disjoint i16 [[M]], -32768 ; CHECK-NEXT: [[R:%.*]] = sext i16 [[O]] to i32 ; CHECK-NEXT: ret i32 [[R]] ; @@ -160,7 +160,7 @@ define i32 @eval_sext_multi_use_in_one_inst(i32 %x) { ; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i16 [[A]], metadata [[META77:![0-9]+]], metadata !DIExpression()), !dbg [[DBG82]] ; DBGINFO-NEXT: [[M:%.*]] = mul nuw nsw i16 [[A]], [[A]], !dbg [[DBG83:![0-9]+]] ; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i16 [[M]], metadata [[META78:![0-9]+]], metadata !DIExpression()), !dbg [[DBG83]] -; DBGINFO-NEXT: [[O:%.*]] = or i16 [[M]], -32768, !dbg [[DBG84:![0-9]+]] +; DBGINFO-NEXT: [[O:%.*]] = or disjoint i16 [[M]], -32768, !dbg [[DBG84:![0-9]+]] ; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i16 [[O]], metadata [[META79:![0-9]+]], metadata !DIExpression()), !dbg [[DBG84]] ; DBGINFO-NEXT: [[R:%.*]] = sext i16 [[O]] to i32, !dbg [[DBG85:![0-9]+]] ; DBGINFO-NEXT: call void @llvm.dbg.value(metadata i32 [[R]], metadata [[META80:![0-9]+]], metadata !DIExpression()), !dbg [[DBG85]] diff --git a/llvm/test/Transforms/InstCombine/cast.ll b/llvm/test/Transforms/InstCombine/cast.ll index afa7ac45e96dc..1cda0e503ee93 100644 --- a/llvm/test/Transforms/InstCombine/cast.ll +++ b/llvm/test/Transforms/InstCombine/cast.ll @@ -467,7 +467,7 @@ define i16 @test40(i16 %a) { ; ALL-LABEL: @test40( ; ALL-NEXT: [[T21:%.*]] = lshr i16 [[A:%.*]], 9 ; ALL-NEXT: [[T5:%.*]] = shl i16 [[A]], 8 -; ALL-NEXT: [[T32:%.*]] = or i16 [[T21]], [[T5]] +; ALL-NEXT: [[T32:%.*]] = or disjoint i16 [[T21]], [[T5]] ; ALL-NEXT: ret i16 [[T32]] ; %t = zext i16 %a to i32 @@ -482,7 +482,7 @@ define <2 x i16> @test40vec(<2 x i16> %a) { ; ALL-LABEL: @test40vec( ; ALL-NEXT: [[T21:%.*]] = lshr <2 x i16> [[A:%.*]], ; ALL-NEXT: [[T5:%.*]] = shl <2 x i16> [[A]], -; ALL-NEXT: [[T32:%.*]] = or <2 x i16> [[T21]], [[T5]] +; ALL-NEXT: [[T32:%.*]] = or disjoint <2 x i16> [[T21]], [[T5]] ; ALL-NEXT: ret <2 x i16> [[T32]] ; %t = zext <2 x i16> %a to <2 x i32> @@ -497,7 +497,7 @@ define <2 x i16> @test40vec_nonuniform(<2 x i16> %a) { ; ALL-LABEL: @test40vec_nonuniform( ; ALL-NEXT: [[T21:%.*]] = lshr <2 x i16> [[A:%.*]], ; ALL-NEXT: [[T5:%.*]] = shl <2 x i16> [[A]], -; ALL-NEXT: [[T32:%.*]] = or <2 x i16> [[T21]], [[T5]] +; ALL-NEXT: [[T32:%.*]] = or disjoint <2 x i16> [[T21]], [[T5]] ; ALL-NEXT: ret <2 x i16> [[T32]] ; %t = zext <2 x i16> %a to <2 x i32> @@ -646,7 +646,7 @@ define i64 @test48(i8 %A1, i8 %a2) { ; ALL-LABEL: @test48( ; ALL-NEXT: [[Z2:%.*]] = zext i8 [[A1:%.*]] to i32 ; ALL-NEXT: [[C:%.*]] = shl nuw nsw i32 [[Z2]], 8 -; ALL-NEXT: [[D:%.*]] = or i32 [[C]], [[Z2]] +; ALL-NEXT: [[D:%.*]] = or disjoint i32 [[C]], [[Z2]] ; ALL-NEXT: [[E:%.*]] = zext nneg i32 [[D]] to i64 ; ALL-NEXT: ret i64 [[E]] ; @@ -690,7 +690,7 @@ define i64 @test51(i64 %A, i1 %cond) { ; ALL-NEXT: [[C:%.*]] = and i64 [[A:%.*]], 4294967294 ; ALL-NEXT: [[NOT_COND:%.*]] = xor i1 [[COND:%.*]], true ; ALL-NEXT: [[MASKSEL:%.*]] = zext i1 [[NOT_COND]] to i64 -; ALL-NEXT: [[E:%.*]] = or i64 [[C]], [[MASKSEL]] +; ALL-NEXT: [[E:%.*]] = or disjoint i64 [[C]], [[MASKSEL]] ; ALL-NEXT: [[SEXT:%.*]] = shl nuw i64 [[E]], 32 ; ALL-NEXT: [[F:%.*]] = ashr exact i64 [[SEXT]], 32 ; ALL-NEXT: ret i64 [[F]] @@ -707,7 +707,7 @@ define i32 @test52(i64 %A) { ; ALL-LABEL: @test52( ; ALL-NEXT: [[B:%.*]] = trunc i64 [[A:%.*]] to i32 ; ALL-NEXT: [[C:%.*]] = and i32 [[B]], 7224 -; ALL-NEXT: [[D:%.*]] = or i32 [[C]], 32962 +; ALL-NEXT: [[D:%.*]] = or disjoint i32 [[C]], 32962 ; ALL-NEXT: ret i32 [[D]] ; %B = trunc i64 %A to i16 @@ -720,7 +720,7 @@ define i32 @test52(i64 %A) { define i64 @test53(i32 %A) { ; ALL-LABEL: @test53( ; ALL-NEXT: [[TMP1:%.*]] = and i32 [[A:%.*]], 7224 -; ALL-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], 32962 +; ALL-NEXT: [[TMP2:%.*]] = or disjoint i32 [[TMP1]], 32962 ; ALL-NEXT: [[D:%.*]] = zext nneg i32 [[TMP2]] to i64 ; ALL-NEXT: ret i64 [[D]] ; @@ -735,7 +735,7 @@ define i32 @test54(i64 %A) { ; ALL-LABEL: @test54( ; ALL-NEXT: [[B:%.*]] = trunc i64 [[A:%.*]] to i32 ; ALL-NEXT: [[C:%.*]] = and i32 [[B]], 7224 -; ALL-NEXT: [[D:%.*]] = or i32 [[C]], -32574 +; ALL-NEXT: [[D:%.*]] = or disjoint i32 [[C]], -32574 ; ALL-NEXT: ret i32 [[D]] ; %B = trunc i64 %A to i16 @@ -749,7 +749,7 @@ define i64 @test55(i32 %A) { ; ALL-LABEL: @test55( ; ALL-NEXT: [[TMP1:%.*]] = and i32 [[A:%.*]], 7224 ; ALL-NEXT: [[C:%.*]] = zext nneg i32 [[TMP1]] to i64 -; ALL-NEXT: [[D:%.*]] = or i64 [[C]], -32574 +; ALL-NEXT: [[D:%.*]] = or disjoint i64 [[C]], -32574 ; ALL-NEXT: ret i64 [[D]] ; %B = trunc i32 %A to i16 @@ -814,7 +814,7 @@ define i64 @test58(i64 %A) { ; ALL-LABEL: @test58( ; ALL-NEXT: [[C:%.*]] = lshr i64 [[A:%.*]], 8 ; ALL-NEXT: [[D:%.*]] = and i64 [[C]], 16777087 -; ALL-NEXT: [[E:%.*]] = or i64 [[D]], 128 +; ALL-NEXT: [[E:%.*]] = or disjoint i64 [[D]], 128 ; ALL-NEXT: ret i64 [[E]] ; %B = trunc i64 %A to i32 @@ -832,7 +832,7 @@ define i64 @test59(i8 %A, i8 %B) { ; ALL-NEXT: [[E:%.*]] = and i64 [[D]], 48 ; ALL-NEXT: [[TMP1:%.*]] = lshr i8 [[B:%.*]], 4 ; ALL-NEXT: [[G:%.*]] = zext nneg i8 [[TMP1]] to i64 -; ALL-NEXT: [[H:%.*]] = or i64 [[E]], [[G]] +; ALL-NEXT: [[H:%.*]] = or disjoint i64 [[E]], [[G]] ; ALL-NEXT: ret i64 [[H]] ; %C = zext i8 %A to i32 @@ -2139,7 +2139,7 @@ define i32 @test95(i32 %x) { ; ALL-LABEL: @test95( ; ALL-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 6 ; ALL-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 2 -; ALL-NEXT: [[TMP3:%.*]] = or i32 [[TMP2]], 40 +; ALL-NEXT: [[TMP3:%.*]] = or disjoint i32 [[TMP2]], 40 ; ALL-NEXT: ret i32 [[TMP3]] ; %1 = trunc i32 %x to i8 diff --git a/llvm/test/Transforms/InstCombine/free-inversion.ll b/llvm/test/Transforms/InstCombine/free-inversion.ll index 851f9823cc692..5e5e65164f707 100644 --- a/llvm/test/Transforms/InstCombine/free-inversion.ll +++ b/llvm/test/Transforms/InstCombine/free-inversion.ll @@ -526,7 +526,7 @@ define i8 @lshr_not_nneg(i8 %x, i8 %y) { define i8 @lshr_not_nneg2(i8 %x) { ; CHECK-LABEL: @lshr_not_nneg2( ; CHECK-NEXT: [[SHR:%.*]] = lshr i8 [[X:%.*]], 1 -; CHECK-NEXT: [[SHR_NOT1:%.*]] = or i8 [[SHR]], -128 +; CHECK-NEXT: [[SHR_NOT1:%.*]] = or disjoint i8 [[SHR]], -128 ; CHECK-NEXT: ret i8 [[SHR_NOT1]] ; %x.not = xor i8 %x, -1 diff --git a/llvm/test/Transforms/InstCombine/funnel.ll b/llvm/test/Transforms/InstCombine/funnel.ll index dd8cb2d153fda..772a052b3a4f8 100644 --- a/llvm/test/Transforms/InstCombine/funnel.ll +++ b/llvm/test/Transforms/InstCombine/funnel.ll @@ -361,7 +361,7 @@ define i32 @fshl_concat_i8_i24(i8 %x, i24 %y, ptr %addr) { ; CHECK-NEXT: [[ZEXT_X:%.*]] = zext i8 [[X:%.*]] to i32 ; CHECK-NEXT: [[SLX:%.*]] = shl nuw i32 [[ZEXT_X]], 24 ; CHECK-NEXT: [[ZEXT_Y:%.*]] = zext i24 [[Y:%.*]] to i32 -; CHECK-NEXT: [[XY:%.*]] = or i32 [[SLX]], [[ZEXT_Y]] +; CHECK-NEXT: [[XY:%.*]] = or disjoint i32 [[SLX]], [[ZEXT_Y]] ; CHECK-NEXT: store i32 [[XY]], ptr [[ADDR:%.*]], align 4 ; CHECK-NEXT: [[YX:%.*]] = call i32 @llvm.fshl.i32(i32 [[XY]], i32 [[XY]], i32 8) ; CHECK-NEXT: ret i32 [[YX]] @@ -381,7 +381,7 @@ define i32 @fshl_concat_i8_i8(i8 %x, i8 %y, ptr %addr) { ; CHECK-NEXT: [[ZEXT_X:%.*]] = zext i8 [[X:%.*]] to i32 ; CHECK-NEXT: [[SLX:%.*]] = shl nuw nsw i32 [[ZEXT_X]], 13 ; CHECK-NEXT: [[ZEXT_Y:%.*]] = zext i8 [[Y:%.*]] to i32 -; CHECK-NEXT: [[XY:%.*]] = or i32 [[SLX]], [[ZEXT_Y]] +; CHECK-NEXT: [[XY:%.*]] = or disjoint i32 [[SLX]], [[ZEXT_Y]] ; CHECK-NEXT: store i32 [[XY]], ptr [[ADDR:%.*]], align 4 ; CHECK-NEXT: [[YX:%.*]] = call i32 @llvm.fshl.i32(i32 [[XY]], i32 [[XY]], i32 19) ; CHECK-NEXT: ret i32 [[YX]] @@ -401,7 +401,7 @@ define i32 @fshl_concat_i8_i8_overlap(i8 %x, i8 %y, ptr %addr) { ; CHECK-NEXT: [[ZEXT_X:%.*]] = zext i8 [[X:%.*]] to i32 ; CHECK-NEXT: [[SLX:%.*]] = shl i32 [[ZEXT_X]], 25 ; CHECK-NEXT: [[ZEXT_Y:%.*]] = zext i8 [[Y:%.*]] to i32 -; CHECK-NEXT: [[XY:%.*]] = or i32 [[SLX]], [[ZEXT_Y]] +; CHECK-NEXT: [[XY:%.*]] = or disjoint i32 [[SLX]], [[ZEXT_Y]] ; CHECK-NEXT: store i32 [[XY]], ptr [[ADDR:%.*]], align 4 ; CHECK-NEXT: [[SLY:%.*]] = shl nuw nsw i32 [[ZEXT_Y]], 7 ; CHECK-NEXT: [[YX:%.*]] = or i32 [[SLY]], [[ZEXT_X]] @@ -426,7 +426,7 @@ define i32 @fshl_concat_i8_i8_drop(i8 %x, i8 %y, ptr %addr) { ; CHECK-NEXT: [[XY:%.*]] = or i32 [[SLX]], [[ZEXT_Y]] ; CHECK-NEXT: store i32 [[XY]], ptr [[ADDR:%.*]], align 4 ; CHECK-NEXT: [[SLY:%.*]] = shl i32 [[ZEXT_Y]], 25 -; CHECK-NEXT: [[YX:%.*]] = or i32 [[SLY]], [[ZEXT_X]] +; CHECK-NEXT: [[YX:%.*]] = or disjoint i32 [[SLY]], [[ZEXT_X]] ; CHECK-NEXT: ret i32 [[YX]] ; ; Test sly drop. @@ -445,10 +445,10 @@ define i32 @fshl_concat_i8_i8_different_slot(i8 %x, i8 %y, ptr %addr) { ; CHECK-NEXT: [[ZEXT_X:%.*]] = zext i8 [[X:%.*]] to i32 ; CHECK-NEXT: [[SLX:%.*]] = shl nuw nsw i32 [[ZEXT_X]], 9 ; CHECK-NEXT: [[ZEXT_Y:%.*]] = zext i8 [[Y:%.*]] to i32 -; CHECK-NEXT: [[XY:%.*]] = or i32 [[SLX]], [[ZEXT_Y]] +; CHECK-NEXT: [[XY:%.*]] = or disjoint i32 [[SLX]], [[ZEXT_Y]] ; CHECK-NEXT: store i32 [[XY]], ptr [[ADDR:%.*]], align 4 ; CHECK-NEXT: [[SLY:%.*]] = shl nuw nsw i32 [[ZEXT_Y]], 22 -; CHECK-NEXT: [[YX:%.*]] = or i32 [[SLY]], [[ZEXT_X]] +; CHECK-NEXT: [[YX:%.*]] = or disjoint i32 [[SLY]], [[ZEXT_X]] ; CHECK-NEXT: ret i32 [[YX]] ; %zext.x = zext i8 %x to i32 @@ -483,7 +483,7 @@ define <2 x i32> @fshl_concat_vector(<2 x i8> %x, <2 x i24> %y, ptr %addr) { ; CHECK-NEXT: [[ZEXT_X:%.*]] = zext <2 x i8> [[X:%.*]] to <2 x i32> ; CHECK-NEXT: [[SLX:%.*]] = shl nuw <2 x i32> [[ZEXT_X]], ; CHECK-NEXT: [[ZEXT_Y:%.*]] = zext <2 x i24> [[Y:%.*]] to <2 x i32> -; CHECK-NEXT: [[XY:%.*]] = or <2 x i32> [[SLX]], [[ZEXT_Y]] +; CHECK-NEXT: [[XY:%.*]] = or disjoint <2 x i32> [[SLX]], [[ZEXT_Y]] ; CHECK-NEXT: store <2 x i32> [[XY]], ptr [[ADDR:%.*]], align 4 ; CHECK-NEXT: [[YX:%.*]] = call <2 x i32> @llvm.fshl.v2i32(<2 x i32> [[XY]], <2 x i32> [[XY]], <2 x i32> ) ; CHECK-NEXT: ret <2 x i32> [[YX]] diff --git a/llvm/test/Transforms/InstCombine/icmp-mul-and.ll b/llvm/test/Transforms/InstCombine/icmp-mul-and.ll index 3a202be58245f..d5f5641392c0c 100644 --- a/llvm/test/Transforms/InstCombine/icmp-mul-and.ll +++ b/llvm/test/Transforms/InstCombine/icmp-mul-and.ll @@ -251,7 +251,7 @@ define i1 @pr51551_2(i32 %x, i32 %y) { define i1 @pr51551_neg1(i32 %x, i32 %y) { ; CHECK-LABEL: @pr51551_neg1( ; CHECK-NEXT: [[T0:%.*]] = and i32 [[Y:%.*]], 4 -; CHECK-NEXT: [[T1:%.*]] = or i32 [[T0]], 1 +; CHECK-NEXT: [[T1:%.*]] = or disjoint i32 [[T0]], 1 ; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[T1]], [[X:%.*]] ; CHECK-NEXT: [[AND:%.*]] = and i32 [[MUL]], 7 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0 diff --git a/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll b/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll index ef4f2bfecfd8e..fd61c8a301662 100644 --- a/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll +++ b/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll @@ -358,7 +358,7 @@ define i1 @xor_ugt_2(i8 %xx, i8 %y, i8 %z) { ; CHECK-LABEL: @xor_ugt_2( ; CHECK-NEXT: [[X:%.*]] = add i8 [[XX:%.*]], [[Z:%.*]] ; CHECK-NEXT: [[YZ:%.*]] = and i8 [[Y:%.*]], 63 -; CHECK-NEXT: [[Y1:%.*]] = or i8 [[YZ]], 64 +; CHECK-NEXT: [[Y1:%.*]] = or disjoint i8 [[YZ]], 64 ; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X]], [[Y1]] ; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X]], [[XOR]] ; CHECK-NEXT: ret i1 [[R]] @@ -385,7 +385,7 @@ define i1 @xor_ult(i8 %x) { define <2 x i1> @xor_sgt(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @xor_sgt( ; CHECK-NEXT: [[YZ:%.*]] = and <2 x i8> [[Y:%.*]], -; CHECK-NEXT: [[Y1:%.*]] = or <2 x i8> [[YZ]], +; CHECK-NEXT: [[Y1:%.*]] = or disjoint <2 x i8> [[YZ]], ; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i8> [[Y1]], [[X:%.*]] ; CHECK-NEXT: [[R:%.*]] = icmp sgt <2 x i8> [[XOR]], [[X]] ; CHECK-NEXT: ret <2 x i1> [[R]] @@ -400,7 +400,7 @@ define <2 x i1> @xor_sgt(<2 x i8> %x, <2 x i8> %y) { define <2 x i1> @xor_sgt_fail_no_known_msb(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @xor_sgt_fail_no_known_msb( ; CHECK-NEXT: [[YZ:%.*]] = and <2 x i8> [[Y:%.*]], -; CHECK-NEXT: [[Y1:%.*]] = or <2 x i8> [[YZ]], +; CHECK-NEXT: [[Y1:%.*]] = or disjoint <2 x i8> [[YZ]], ; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i8> [[Y1]], [[X:%.*]] ; CHECK-NEXT: [[R:%.*]] = icmp sgt <2 x i8> [[XOR]], [[X]] ; CHECK-NEXT: ret <2 x i1> [[R]] diff --git a/llvm/test/Transforms/InstCombine/icmp.ll b/llvm/test/Transforms/InstCombine/icmp.ll index 78ac730cf026e..1c7bb36f0d34c 100644 --- a/llvm/test/Transforms/InstCombine/icmp.ll +++ b/llvm/test/Transforms/InstCombine/icmp.ll @@ -3862,9 +3862,9 @@ define <8 x i1> @bitreverse_vec_ne(<8 x i16> %x, <8 x i16> %y) { define i1 @knownbits1(i8 %a, i8 %b) { ; CHECK-LABEL: @knownbits1( ; CHECK-NEXT: [[A1:%.*]] = and i8 [[A:%.*]], 1 -; CHECK-NEXT: [[A2:%.*]] = or i8 [[A1]], 4 +; CHECK-NEXT: [[A2:%.*]] = or disjoint i8 [[A1]], 4 ; CHECK-NEXT: [[B1:%.*]] = and i8 [[B:%.*]], 2 -; CHECK-NEXT: [[B2:%.*]] = or i8 [[B1]], 5 +; CHECK-NEXT: [[B2:%.*]] = or disjoint i8 [[B1]], 5 ; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[A2]], [[B2]] ; CHECK-NEXT: ret i1 [[C]] ; @@ -3879,9 +3879,9 @@ define i1 @knownbits1(i8 %a, i8 %b) { define i1 @knownbits2(i8 %a, i8 %b) { ; CHECK-LABEL: @knownbits2( ; CHECK-NEXT: [[A1:%.*]] = and i8 [[A:%.*]], 1 -; CHECK-NEXT: [[A2:%.*]] = or i8 [[A1]], 4 +; CHECK-NEXT: [[A2:%.*]] = or disjoint i8 [[A1]], 4 ; CHECK-NEXT: [[B1:%.*]] = and i8 [[B:%.*]], 2 -; CHECK-NEXT: [[B2:%.*]] = or i8 [[B1]], 5 +; CHECK-NEXT: [[B2:%.*]] = or disjoint i8 [[B1]], 5 ; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[A2]], [[B2]] ; CHECK-NEXT: ret i1 [[C]] ; @@ -3896,9 +3896,9 @@ define i1 @knownbits2(i8 %a, i8 %b) { define i1 @knownbits3(i8 %a, i8 %b) { ; CHECK-LABEL: @knownbits3( ; CHECK-NEXT: [[A1:%.*]] = and i8 [[A:%.*]], 1 -; CHECK-NEXT: [[A2:%.*]] = or i8 [[A1]], 4 +; CHECK-NEXT: [[A2:%.*]] = or disjoint i8 [[A1]], 4 ; CHECK-NEXT: [[B1:%.*]] = and i8 [[B:%.*]], 2 -; CHECK-NEXT: [[B2:%.*]] = or i8 [[B1]], 5 +; CHECK-NEXT: [[B2:%.*]] = or disjoint i8 [[B1]], 5 ; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[B2]], [[A2]] ; CHECK-NEXT: ret i1 [[C]] ; @@ -3913,9 +3913,9 @@ define i1 @knownbits3(i8 %a, i8 %b) { define <2 x i1> @knownbits4(<2 x i8> %a, <2 x i8> %b) { ; CHECK-LABEL: @knownbits4( ; CHECK-NEXT: [[A1:%.*]] = and <2 x i8> [[A:%.*]], -; CHECK-NEXT: [[A2:%.*]] = or <2 x i8> [[A1]], +; CHECK-NEXT: [[A2:%.*]] = or disjoint <2 x i8> [[A1]], ; CHECK-NEXT: [[B1:%.*]] = and <2 x i8> [[B:%.*]], -; CHECK-NEXT: [[B2:%.*]] = or <2 x i8> [[B1]], +; CHECK-NEXT: [[B2:%.*]] = or disjoint <2 x i8> [[B1]], ; CHECK-NEXT: [[C:%.*]] = icmp ne <2 x i8> [[B2]], [[A2]] ; CHECK-NEXT: ret <2 x i1> [[C]] ; @@ -3932,9 +3932,9 @@ define <2 x i1> @knownbits4(<2 x i8> %a, <2 x i8> %b) { define i1 @knownbits5(i8 %a, i8 %b) { ; CHECK-LABEL: @knownbits5( ; CHECK-NEXT: [[A1:%.*]] = and i8 [[A:%.*]], -127 -; CHECK-NEXT: [[A2:%.*]] = or i8 [[A1]], 4 +; CHECK-NEXT: [[A2:%.*]] = or disjoint i8 [[A1]], 4 ; CHECK-NEXT: [[B1:%.*]] = and i8 [[B:%.*]], 2 -; CHECK-NEXT: [[B2:%.*]] = or i8 [[B1]], 5 +; CHECK-NEXT: [[B2:%.*]] = or disjoint i8 [[B1]], 5 ; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[A2]], [[B2]] ; CHECK-NEXT: ret i1 [[C]] ; @@ -3949,9 +3949,9 @@ define i1 @knownbits5(i8 %a, i8 %b) { define i1 @knownbits6(i8 %a, i8 %b) { ; CHECK-LABEL: @knownbits6( ; CHECK-NEXT: [[A1:%.*]] = and i8 [[A:%.*]], -127 -; CHECK-NEXT: [[A2:%.*]] = or i8 [[A1]], 4 +; CHECK-NEXT: [[A2:%.*]] = or disjoint i8 [[A1]], 4 ; CHECK-NEXT: [[B1:%.*]] = and i8 [[B:%.*]], 2 -; CHECK-NEXT: [[B2:%.*]] = or i8 [[B1]], 5 +; CHECK-NEXT: [[B2:%.*]] = or disjoint i8 [[B1]], 5 ; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[A2]], [[B2]] ; CHECK-NEXT: ret i1 [[C]] ; @@ -3966,9 +3966,9 @@ define i1 @knownbits6(i8 %a, i8 %b) { define <2 x i1> @knownbits7(<2 x i8> %a, <2 x i8> %b) { ; CHECK-LABEL: @knownbits7( ; CHECK-NEXT: [[A1:%.*]] = and <2 x i8> [[A:%.*]], -; CHECK-NEXT: [[A2:%.*]] = or <2 x i8> [[A1]], +; CHECK-NEXT: [[A2:%.*]] = or disjoint <2 x i8> [[A1]], ; CHECK-NEXT: [[B1:%.*]] = and <2 x i8> [[B:%.*]], -; CHECK-NEXT: [[B2:%.*]] = or <2 x i8> [[B1]], +; CHECK-NEXT: [[B2:%.*]] = or disjoint <2 x i8> [[B1]], ; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i8> [[B2]], [[A2]] ; CHECK-NEXT: ret <2 x i1> [[C]] ; @@ -3983,9 +3983,9 @@ define <2 x i1> @knownbits7(<2 x i8> %a, <2 x i8> %b) { define i1 @knownbits8(i8 %a, i8 %b) { ; CHECK-LABEL: @knownbits8( ; CHECK-NEXT: [[A1:%.*]] = and i8 [[A:%.*]], -127 -; CHECK-NEXT: [[A2:%.*]] = or i8 [[A1]], 4 +; CHECK-NEXT: [[A2:%.*]] = or disjoint i8 [[A1]], 4 ; CHECK-NEXT: [[B1:%.*]] = and i8 [[B:%.*]], 2 -; CHECK-NEXT: [[B2:%.*]] = or i8 [[B1]], 5 +; CHECK-NEXT: [[B2:%.*]] = or disjoint i8 [[B1]], 5 ; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[B2]], [[A2]] ; CHECK-NEXT: ret i1 [[C]] ; diff --git a/llvm/test/Transforms/InstCombine/logical-select.ll b/llvm/test/Transforms/InstCombine/logical-select.ll index 2bdb3021523fb..31848bc911892 100644 --- a/llvm/test/Transforms/InstCombine/logical-select.ll +++ b/llvm/test/Transforms/InstCombine/logical-select.ll @@ -770,7 +770,7 @@ define <8 x i3> @bitcast_vec_cond_commute1(<3 x i1> %cond, <8 x i3> %pc, <8 x i3 ; CHECK-NEXT: [[NOTT9:%.*]] = xor <8 x i3> [[T9]], ; CHECK-NEXT: [[T11:%.*]] = and <8 x i3> [[C]], [[NOTT9]] ; CHECK-NEXT: [[T12:%.*]] = and <8 x i3> [[T9]], [[D:%.*]] -; CHECK-NEXT: [[R:%.*]] = or <8 x i3> [[T11]], [[T12]] +; CHECK-NEXT: [[R:%.*]] = or disjoint <8 x i3> [[T11]], [[T12]] ; CHECK-NEXT: ret <8 x i3> [[R]] ; %c = mul <8 x i3> %pc, %pc ; thwart complexity-based canonicalization @@ -836,7 +836,7 @@ define <2 x i64> @bitcast_fp_vec_cond(<2 x double> %s, <2 x i64> %c, <2 x i64> % ; CHECK-NEXT: [[NOTT9:%.*]] = xor <2 x i64> [[T9]], ; CHECK-NEXT: [[T11:%.*]] = and <2 x i64> [[NOTT9]], [[C:%.*]] ; CHECK-NEXT: [[T12:%.*]] = and <2 x i64> [[T9]], [[D:%.*]] -; CHECK-NEXT: [[R:%.*]] = or <2 x i64> [[T11]], [[T12]] +; CHECK-NEXT: [[R:%.*]] = or disjoint <2 x i64> [[T11]], [[T12]] ; CHECK-NEXT: ret <2 x i64> [[R]] ; %t9 = bitcast <2 x double> %s to <2 x i64> @@ -856,7 +856,7 @@ define <2 x i64> @bitcast_int_vec_cond(i1 %b, <2 x i64> %c, <2 x i64> %d) { ; CHECK-NEXT: [[NOTT9:%.*]] = xor <2 x i64> [[T9]], ; CHECK-NEXT: [[T11:%.*]] = and <2 x i64> [[NOTT9]], [[C:%.*]] ; CHECK-NEXT: [[T12:%.*]] = and <2 x i64> [[T9]], [[D:%.*]] -; CHECK-NEXT: [[R:%.*]] = or <2 x i64> [[T11]], [[T12]] +; CHECK-NEXT: [[R:%.*]] = or disjoint <2 x i64> [[T11]], [[T12]] ; CHECK-NEXT: ret <2 x i64> [[R]] ; %s = sext i1 %b to i128 diff --git a/llvm/test/Transforms/InstCombine/masked-merge-or.ll b/llvm/test/Transforms/InstCombine/masked-merge-or.ll index 1716c5eedcf39..7d67b2d495412 100644 --- a/llvm/test/Transforms/InstCombine/masked-merge-or.ll +++ b/llvm/test/Transforms/InstCombine/masked-merge-or.ll @@ -21,7 +21,7 @@ define i32 @p(i32 %x, i32 %y, i32 %m) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]] ; CHECK-NEXT: ret i32 [[RET]] ; %and = and i32 %x, %m @@ -36,7 +36,7 @@ define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> %m) { ; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor <2 x i32> [[M]], ; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[NEG]], [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = or <2 x i32> [[AND]], [[AND1]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint <2 x i32> [[AND]], [[AND1]] ; CHECK-NEXT: ret <2 x i32> [[RET]] ; %and = and <2 x i32> %x, %m @@ -51,7 +51,7 @@ define <3 x i32> @p_vec_undef(<3 x i32> %x, <3 x i32> %y, <3 x i32> %m) { ; CHECK-NEXT: [[AND:%.*]] = and <3 x i32> [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor <3 x i32> [[M]], ; CHECK-NEXT: [[AND1:%.*]] = and <3 x i32> [[NEG]], [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = or <3 x i32> [[AND]], [[AND1]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint <3 x i32> [[AND]], [[AND1]] ; CHECK-NEXT: ret <3 x i32> [[RET]] ; %and = and <3 x i32> %x, %m @@ -69,7 +69,7 @@ define i32 @p_constmask(i32 %x, i32 %y) { ; CHECK-LABEL: @p_constmask( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 65280 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], -65281 -; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]] ; CHECK-NEXT: ret i32 [[RET]] ; %and = and i32 %x, 65280 @@ -82,7 +82,7 @@ define <2 x i32> @p_constmask_splatvec(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @p_constmask_splatvec( ; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[Y:%.*]], -; CHECK-NEXT: [[RET:%.*]] = or <2 x i32> [[AND]], [[AND1]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint <2 x i32> [[AND]], [[AND1]] ; CHECK-NEXT: ret <2 x i32> [[RET]] ; %and = and <2 x i32> %x, @@ -125,7 +125,7 @@ define i32 @p_constmask2(i32 %x, i32 %y) { ; CHECK-LABEL: @p_constmask2( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 61440 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], -65281 -; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]] ; CHECK-NEXT: ret i32 [[RET]] ; %and = and i32 %x, 61440 @@ -138,7 +138,7 @@ define <2 x i32> @p_constmask2_splatvec(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @p_constmask2_splatvec( ; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[Y:%.*]], -; CHECK-NEXT: [[RET:%.*]] = or <2 x i32> [[AND]], [[AND1]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint <2 x i32> [[AND]], [[AND1]] ; CHECK-NEXT: ret <2 x i32> [[RET]] ; %and = and <2 x i32> %x, @@ -185,7 +185,7 @@ define i32 @p_commutative0(i32 %x, i32 %y, i32 %m) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]] ; CHECK-NEXT: ret i32 [[RET]] ; %and = and i32 %m, %x ; swapped order @@ -201,7 +201,7 @@ define i32 @p_commutative1(i32 %x, i32 %m) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y]], [[NEG]] -; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]] ; CHECK-NEXT: ret i32 [[RET]] ; %y = call i32 @gen32() @@ -217,7 +217,7 @@ define i32 @p_commutative2(i32 %x, i32 %y, i32 %m) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND1]], [[AND]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND1]], [[AND]] ; CHECK-NEXT: ret i32 [[RET]] ; %and = and i32 %x, %m @@ -233,7 +233,7 @@ define i32 @p_commutative3(i32 %x, i32 %m) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y]], [[NEG]] -; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]] ; CHECK-NEXT: ret i32 [[RET]] ; %y = call i32 @gen32() @@ -249,7 +249,7 @@ define i32 @p_commutative4(i32 %x, i32 %y, i32 %m) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND1]], [[AND]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND1]], [[AND]] ; CHECK-NEXT: ret i32 [[RET]] ; %and = and i32 %m, %x ; swapped order @@ -265,7 +265,7 @@ define i32 @p_commutative5(i32 %x, i32 %m) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y]], [[NEG]] -; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND1]], [[AND]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND1]], [[AND]] ; CHECK-NEXT: ret i32 [[RET]] ; %y = call i32 @gen32() @@ -282,7 +282,7 @@ define i32 @p_commutative6(i32 %x, i32 %m) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y]], [[NEG]] -; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND1]], [[AND]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND1]], [[AND]] ; CHECK-NEXT: ret i32 [[RET]] ; %y = call i32 @gen32() @@ -297,7 +297,7 @@ define i32 @p_constmask_commutative(i32 %x, i32 %y) { ; CHECK-LABEL: @p_constmask_commutative( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 65280 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], -65281 -; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND1]], [[AND]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND1]], [[AND]] ; CHECK-NEXT: ret i32 [[RET]] ; %and = and i32 %x, 65280 @@ -319,7 +319,7 @@ define i32 @n0_oneuse(i32 %x, i32 %y, i32 %m) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]] ; CHECK-NEXT: call void @use32(i32 [[AND]]) ; CHECK-NEXT: call void @use32(i32 [[NEG]]) ; CHECK-NEXT: call void @use32(i32 [[AND1]]) @@ -339,7 +339,7 @@ define i32 @n0_constmask_oneuse(i32 %x, i32 %y) { ; CHECK-LABEL: @n0_constmask_oneuse( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 65280 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], -65281 -; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]] ; CHECK-NEXT: call void @use32(i32 [[AND]]) ; CHECK-NEXT: call void @use32(i32 [[AND1]]) ; CHECK-NEXT: ret i32 [[RET]] diff --git a/llvm/test/Transforms/InstCombine/masked-merge-xor.ll b/llvm/test/Transforms/InstCombine/masked-merge-xor.ll index cb747aece74af..4f0845c8c8f46 100644 --- a/llvm/test/Transforms/InstCombine/masked-merge-xor.ll +++ b/llvm/test/Transforms/InstCombine/masked-merge-xor.ll @@ -21,7 +21,7 @@ define i32 @p(i32 %x, i32 %y, i32 %m) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]] ; CHECK-NEXT: ret i32 [[RET]] ; %and = and i32 %x, %m @@ -36,7 +36,7 @@ define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> %m) { ; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor <2 x i32> [[M]], ; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[NEG]], [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = or <2 x i32> [[AND]], [[AND1]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint <2 x i32> [[AND]], [[AND1]] ; CHECK-NEXT: ret <2 x i32> [[RET]] ; %and = and <2 x i32> %x, %m @@ -51,7 +51,7 @@ define <3 x i32> @p_vec_undef(<3 x i32> %x, <3 x i32> %y, <3 x i32> %m) { ; CHECK-NEXT: [[AND:%.*]] = and <3 x i32> [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor <3 x i32> [[M]], ; CHECK-NEXT: [[AND1:%.*]] = and <3 x i32> [[NEG]], [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = or <3 x i32> [[AND]], [[AND1]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint <3 x i32> [[AND]], [[AND1]] ; CHECK-NEXT: ret <3 x i32> [[RET]] ; %and = and <3 x i32> %x, %m @@ -69,7 +69,7 @@ define i32 @p_constmask(i32 %x, i32 %y) { ; CHECK-LABEL: @p_constmask( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 65280 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], -65281 -; CHECK-NEXT: [[RET1:%.*]] = or i32 [[AND]], [[AND1]] +; CHECK-NEXT: [[RET1:%.*]] = or disjoint i32 [[AND]], [[AND1]] ; CHECK-NEXT: ret i32 [[RET1]] ; %and = and i32 %x, 65280 @@ -82,7 +82,7 @@ define <2 x i32> @p_constmask_splatvec(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @p_constmask_splatvec( ; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[Y:%.*]], -; CHECK-NEXT: [[RET1:%.*]] = or <2 x i32> [[AND]], [[AND1]] +; CHECK-NEXT: [[RET1:%.*]] = or disjoint <2 x i32> [[AND]], [[AND1]] ; CHECK-NEXT: ret <2 x i32> [[RET1]] ; %and = and <2 x i32> %x, @@ -125,7 +125,7 @@ define i32 @p_constmask2(i32 %x, i32 %y) { ; CHECK-LABEL: @p_constmask2( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 61440 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], -65281 -; CHECK-NEXT: [[RET1:%.*]] = or i32 [[AND]], [[AND1]] +; CHECK-NEXT: [[RET1:%.*]] = or disjoint i32 [[AND]], [[AND1]] ; CHECK-NEXT: ret i32 [[RET1]] ; %and = and i32 %x, 61440 @@ -138,7 +138,7 @@ define <2 x i32> @p_constmask2_splatvec(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @p_constmask2_splatvec( ; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[Y:%.*]], -; CHECK-NEXT: [[RET1:%.*]] = or <2 x i32> [[AND]], [[AND1]] +; CHECK-NEXT: [[RET1:%.*]] = or disjoint <2 x i32> [[AND]], [[AND1]] ; CHECK-NEXT: ret <2 x i32> [[RET1]] ; %and = and <2 x i32> %x, @@ -185,7 +185,7 @@ define i32 @p_commutative0(i32 %x, i32 %y, i32 %m) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]] ; CHECK-NEXT: ret i32 [[RET]] ; %and = and i32 %m, %x ; swapped order @@ -201,7 +201,7 @@ define i32 @p_commutative1(i32 %x, i32 %m) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y]], [[NEG]] -; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]] ; CHECK-NEXT: ret i32 [[RET]] ; %y = call i32 @gen32() @@ -217,7 +217,7 @@ define i32 @p_commutative2(i32 %x, i32 %y, i32 %m) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND1]], [[AND]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND1]], [[AND]] ; CHECK-NEXT: ret i32 [[RET]] ; %and = and i32 %x, %m @@ -233,7 +233,7 @@ define i32 @p_commutative3(i32 %x, i32 %m) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y]], [[NEG]] -; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]] ; CHECK-NEXT: ret i32 [[RET]] ; %y = call i32 @gen32() @@ -249,7 +249,7 @@ define i32 @p_commutative4(i32 %x, i32 %y, i32 %m) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND1]], [[AND]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND1]], [[AND]] ; CHECK-NEXT: ret i32 [[RET]] ; %and = and i32 %m, %x ; swapped order @@ -265,7 +265,7 @@ define i32 @p_commutative5(i32 %x, i32 %m) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y]], [[NEG]] -; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND1]], [[AND]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND1]], [[AND]] ; CHECK-NEXT: ret i32 [[RET]] ; %y = call i32 @gen32() @@ -282,7 +282,7 @@ define i32 @p_commutative6(i32 %x, i32 %m) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y]], [[NEG]] -; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND1]], [[AND]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND1]], [[AND]] ; CHECK-NEXT: ret i32 [[RET]] ; %y = call i32 @gen32() @@ -297,7 +297,7 @@ define i32 @p_constmask_commutative(i32 %x, i32 %y) { ; CHECK-LABEL: @p_constmask_commutative( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 65280 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], -65281 -; CHECK-NEXT: [[RET1:%.*]] = or i32 [[AND1]], [[AND]] +; CHECK-NEXT: [[RET1:%.*]] = or disjoint i32 [[AND1]], [[AND]] ; CHECK-NEXT: ret i32 [[RET1]] ; %and = and i32 %x, 65280 @@ -319,7 +319,7 @@ define i32 @n0_oneuse(i32 %x, i32 %y, i32 %m) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]] ; CHECK-NEXT: call void @use32(i32 [[AND]]) ; CHECK-NEXT: call void @use32(i32 [[NEG]]) ; CHECK-NEXT: call void @use32(i32 [[AND1]]) @@ -339,7 +339,7 @@ define i32 @n0_constmask_oneuse(i32 %x, i32 %y) { ; CHECK-LABEL: @n0_constmask_oneuse( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 65280 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], -65281 -; CHECK-NEXT: [[RET1:%.*]] = or i32 [[AND]], [[AND1]] +; CHECK-NEXT: [[RET1:%.*]] = or disjoint i32 [[AND]], [[AND1]] ; CHECK-NEXT: call void @use32(i32 [[AND]]) ; CHECK-NEXT: call void @use32(i32 [[AND1]]) ; CHECK-NEXT: ret i32 [[RET1]] diff --git a/llvm/test/Transforms/InstCombine/memcpy-from-global.ll b/llvm/test/Transforms/InstCombine/memcpy-from-global.ll index 59e756eed3fd7..aeca0cd2924ea 100644 --- a/llvm/test/Transforms/InstCombine/memcpy-from-global.ll +++ b/llvm/test/Transforms/InstCombine/memcpy-from-global.ll @@ -13,19 +13,19 @@ define float @test1(i32 %hash, float %x, float %y, float %z, float %w) { ; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[TMP753]], align 4 ; CHECK-NEXT: [[TMP11:%.*]] = fmul float [[TMP9]], [[X:%.*]] ; CHECK-NEXT: [[TMP13:%.*]] = fadd float [[TMP11]], 0.000000e+00 -; CHECK-NEXT: [[TMP17_SUM52:%.*]] = or i32 [[TMP5]], 1 +; CHECK-NEXT: [[TMP17_SUM52:%.*]] = or disjoint i32 [[TMP5]], 1 ; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[TMP17_SUM52]] to i64 ; CHECK-NEXT: [[TMP1851:%.*]] = getelementptr [128 x float], ptr @C.0.1248, i64 0, i64 [[TMP1]] ; CHECK-NEXT: [[TMP19:%.*]] = load float, ptr [[TMP1851]], align 4 ; CHECK-NEXT: [[TMP21:%.*]] = fmul float [[TMP19]], [[Y:%.*]] ; CHECK-NEXT: [[TMP23:%.*]] = fadd float [[TMP21]], [[TMP13]] -; CHECK-NEXT: [[TMP27_SUM50:%.*]] = or i32 [[TMP5]], 2 +; CHECK-NEXT: [[TMP27_SUM50:%.*]] = or disjoint i32 [[TMP5]], 2 ; CHECK-NEXT: [[TMP2:%.*]] = zext nneg i32 [[TMP27_SUM50]] to i64 ; CHECK-NEXT: [[TMP2849:%.*]] = getelementptr [128 x float], ptr @C.0.1248, i64 0, i64 [[TMP2]] ; CHECK-NEXT: [[TMP29:%.*]] = load float, ptr [[TMP2849]], align 4 ; CHECK-NEXT: [[TMP31:%.*]] = fmul float [[TMP29]], [[Z:%.*]] ; CHECK-NEXT: [[TMP33:%.*]] = fadd float [[TMP31]], [[TMP23]] -; CHECK-NEXT: [[TMP37_SUM48:%.*]] = or i32 [[TMP5]], 3 +; CHECK-NEXT: [[TMP37_SUM48:%.*]] = or disjoint i32 [[TMP5]], 3 ; CHECK-NEXT: [[TMP3:%.*]] = zext nneg i32 [[TMP37_SUM48]] to i64 ; CHECK-NEXT: [[TMP3847:%.*]] = getelementptr [128 x float], ptr @C.0.1248, i64 0, i64 [[TMP3]] ; CHECK-NEXT: [[TMP39:%.*]] = load float, ptr [[TMP3847]], align 4 diff --git a/llvm/test/Transforms/InstCombine/mul-masked-bits.ll b/llvm/test/Transforms/InstCombine/mul-masked-bits.ll index 74e962067d5ee..bc0bee8fc6039 100644 --- a/llvm/test/Transforms/InstCombine/mul-masked-bits.ll +++ b/llvm/test/Transforms/InstCombine/mul-masked-bits.ll @@ -155,7 +155,7 @@ define i33 @squared_demanded_2_low_bits(i33 %x) { define <2 x i8> @squared_demanded_2_low_bits_splat(<2 x i8> %x) { ; CHECK-LABEL: @squared_demanded_2_low_bits_splat( ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i8> [[X:%.*]], -; CHECK-NEXT: [[AND:%.*]] = or <2 x i8> [[TMP1]], +; CHECK-NEXT: [[AND:%.*]] = or disjoint <2 x i8> [[TMP1]], ; CHECK-NEXT: ret <2 x i8> [[AND]] ; %mul = mul <2 x i8> %x, %x diff --git a/llvm/test/Transforms/InstCombine/mul_full_32.ll b/llvm/test/Transforms/InstCombine/mul_full_32.ll index c4166ffb1ec8b..23d35115dbd1d 100644 --- a/llvm/test/Transforms/InstCombine/mul_full_32.ll +++ b/llvm/test/Transforms/InstCombine/mul_full_32.ll @@ -23,7 +23,7 @@ define { i64, i64 } @mul_full_64(i64 %x, i64 %y) { ; CHECK-NEXT: [[U1LS:%.*]] = shl i64 [[U1]], 32 ; CHECK-NEXT: [[U1H:%.*]] = lshr i64 [[U1]], 32 ; CHECK-NEXT: [[U2:%.*]] = add i64 [[U0H]], [[T3]] -; CHECK-NEXT: [[LO:%.*]] = or i64 [[U1LS]], [[T0L]] +; CHECK-NEXT: [[LO:%.*]] = or disjoint i64 [[U1LS]], [[T0L]] ; CHECK-NEXT: [[HI:%.*]] = add i64 [[U2]], [[U1H]] ; CHECK-NEXT: [[RES_LO:%.*]] = insertvalue { i64, i64 } undef, i64 [[LO]], 0 ; CHECK-NEXT: [[RES:%.*]] = insertvalue { i64, i64 } [[RES_LO]], i64 [[HI]], 1 @@ -79,7 +79,7 @@ define { i32, i32 } @mul_full_32(i32 %x, i32 %y) { ; CHECK-NEXT: [[U1LS:%.*]] = shl i32 [[U1]], 16 ; CHECK-NEXT: [[U1H:%.*]] = lshr i32 [[U1]], 16 ; CHECK-NEXT: [[U2:%.*]] = add i32 [[U0H]], [[T3]] -; CHECK-NEXT: [[LO:%.*]] = or i32 [[U1LS]], [[T0L]] +; CHECK-NEXT: [[LO:%.*]] = or disjoint i32 [[U1LS]], [[T0L]] ; CHECK-NEXT: [[HI:%.*]] = add i32 [[U2]], [[U1H]] ; CHECK-NEXT: [[RES_LO:%.*]] = insertvalue { i32, i32 } undef, i32 [[LO]], 0 ; CHECK-NEXT: [[RES:%.*]] = insertvalue { i32, i32 } [[RES_LO]], i32 [[HI]], 1 diff --git a/llvm/test/Transforms/InstCombine/mul_full_64.ll b/llvm/test/Transforms/InstCombine/mul_full_64.ll index 8a57b548cd14b..eb652f3f8a1d0 100644 --- a/llvm/test/Transforms/InstCombine/mul_full_64.ll +++ b/llvm/test/Transforms/InstCombine/mul_full_64.ll @@ -23,7 +23,7 @@ define { i64, i64 } @mul_full_64_variant0(i64 %x, i64 %y) { ; CHECK-NEXT: [[U1LS:%.*]] = shl i64 [[U1]], 32 ; CHECK-NEXT: [[U1H:%.*]] = lshr i64 [[U1]], 32 ; CHECK-NEXT: [[U2:%.*]] = add i64 [[U0H]], [[T3]] -; CHECK-NEXT: [[LO:%.*]] = or i64 [[U1LS]], [[T0L]] +; CHECK-NEXT: [[LO:%.*]] = or disjoint i64 [[U1LS]], [[T0L]] ; CHECK-NEXT: [[HI:%.*]] = add i64 [[U2]], [[U1H]] ; CHECK-NEXT: [[RES_LO:%.*]] = insertvalue { i64, i64 } undef, i64 [[LO]], 0 ; CHECK-NEXT: [[RES:%.*]] = insertvalue { i64, i64 } [[RES_LO]], i64 [[HI]], 1 @@ -151,7 +151,7 @@ define i64 @mul_full_64_variant2(i64 %a, i64 %b, ptr nocapture %rhi) { ; CHECK-NEXT: store i64 [[ADD17]], ptr [[RHI:%.*]], align 8 ; CHECK-NEXT: [[CONV24:%.*]] = shl i64 [[ADD15]], 32 ; CHECK-NEXT: [[CONV26:%.*]] = and i64 [[MUL7]], 4294967295 -; CHECK-NEXT: [[ADD27:%.*]] = or i64 [[CONV24]], [[CONV26]] +; CHECK-NEXT: [[ADD27:%.*]] = or disjoint i64 [[CONV24]], [[CONV26]] ; CHECK-NEXT: ret i64 [[ADD27]] ; %conv = and i64 %a, 4294967295 @@ -242,7 +242,7 @@ define { i32, i32 } @mul_full_32(i32 %x, i32 %y) { ; CHECK-NEXT: [[U1LS:%.*]] = shl i32 [[U1]], 16 ; CHECK-NEXT: [[U1H:%.*]] = lshr i32 [[U1]], 16 ; CHECK-NEXT: [[U2:%.*]] = add i32 [[U0H]], [[T3]] -; CHECK-NEXT: [[LO:%.*]] = or i32 [[U1LS]], [[T0L]] +; CHECK-NEXT: [[LO:%.*]] = or disjoint i32 [[U1LS]], [[T0L]] ; CHECK-NEXT: [[HI:%.*]] = add i32 [[U2]], [[U1H]] ; CHECK-NEXT: [[RES_LO:%.*]] = insertvalue { i32, i32 } undef, i32 [[LO]], 0 ; CHECK-NEXT: [[RES:%.*]] = insertvalue { i32, i32 } [[RES_LO]], i32 [[HI]], 1 @@ -308,7 +308,7 @@ define { i64, i64 } @mul_full_64_variant0_1() { ; CHECK-NEXT: [[HI:%.*]] = add i64 [[U2]], [[U1H]] ; CHECK-NEXT: [[U1LS:%.*]] = shl i64 [[U1]], 32 ; CHECK-NEXT: [[T0L:%.*]] = and i64 [[T0]], 4294967295 -; CHECK-NEXT: [[LO:%.*]] = or i64 [[U1LS]], [[T0L]] +; CHECK-NEXT: [[LO:%.*]] = or disjoint i64 [[U1LS]], [[T0L]] ; CHECK-NEXT: [[RES_LO:%.*]] = insertvalue { i64, i64 } undef, i64 [[LO]], 0 ; CHECK-NEXT: [[RES:%.*]] = insertvalue { i64, i64 } [[RES_LO]], i64 [[HI]], 1 ; CHECK-NEXT: ret { i64, i64 } [[RES]] @@ -366,7 +366,7 @@ define { i64, i64 } @mul_full_64_variant0_2() { ; CHECK-NEXT: [[HI:%.*]] = add i64 [[U1H]], [[U2]] ; CHECK-NEXT: [[U1LS:%.*]] = shl i64 [[U1]], 32 ; CHECK-NEXT: [[T0L:%.*]] = and i64 [[T0]], 4294967295 -; CHECK-NEXT: [[LO:%.*]] = or i64 [[T0L]], [[U1LS]] +; CHECK-NEXT: [[LO:%.*]] = or disjoint i64 [[T0L]], [[U1LS]] ; CHECK-NEXT: [[RES_LO:%.*]] = insertvalue { i64, i64 } undef, i64 [[LO]], 0 ; CHECK-NEXT: [[RES:%.*]] = insertvalue { i64, i64 } [[RES_LO]], i64 [[HI]], 1 ; CHECK-NEXT: ret { i64, i64 } [[RES]] @@ -463,7 +463,7 @@ define i64 @mullo(i64 %x, i64 %y) { ; CHECK-NEXT: [[U0:%.*]] = add i64 [[T0H]], [[T1]] ; CHECK-NEXT: [[U1:%.*]] = add i64 [[U0]], [[T2]] ; CHECK-NEXT: [[U1LS:%.*]] = shl i64 [[U1]], 32 -; CHECK-NEXT: [[LO:%.*]] = or i64 [[U1LS]], [[T0L]] +; CHECK-NEXT: [[LO:%.*]] = or disjoint i64 [[U1LS]], [[T0L]] ; CHECK-NEXT: ret i64 [[LO]] ; %xl = and i64 %x, 4294967295 @@ -530,7 +530,7 @@ define i64 @mullo_duplicate(i64 %x, i64 %y) { ; CHECK-NEXT: [[U0:%.*]] = add i64 [[T0H]], [[T1]] ; CHECK-NEXT: [[U1:%.*]] = add i64 [[U0]], [[T2]] ; CHECK-NEXT: [[U1LS:%.*]] = shl i64 [[U1]], 32 -; CHECK-NEXT: [[LO:%.*]] = or i64 [[U1LS]], [[T0L]] +; CHECK-NEXT: [[LO:%.*]] = or disjoint i64 [[U1LS]], [[T0L]] ; CHECK-NEXT: ret i64 [[LO]] ; %duplicated_mul = mul i64 %x, %y @@ -581,7 +581,7 @@ define { i64, i64 } @mul_full_64_duplicate(i64 %x, i64 %y) { ; CHECK-NEXT: [[U1LS:%.*]] = shl i64 [[U1]], 32 ; CHECK-NEXT: [[U1H:%.*]] = lshr i64 [[U1]], 32 ; CHECK-NEXT: [[U2:%.*]] = add i64 [[U0H]], [[T3]] -; CHECK-NEXT: [[LO:%.*]] = or i64 [[U1LS]], [[T0L]] +; CHECK-NEXT: [[LO:%.*]] = or disjoint i64 [[U1LS]], [[T0L]] ; CHECK-NEXT: [[HI:%.*]] = add i64 [[U2]], [[U1H]] ; CHECK-NEXT: [[RES_LO:%.*]] = insertvalue { i64, i64 } undef, i64 [[LO]], 0 ; CHECK-NEXT: [[RES:%.*]] = insertvalue { i64, i64 } [[RES_LO]], i64 [[HI]], 1 diff --git a/llvm/test/Transforms/InstCombine/or-concat.ll b/llvm/test/Transforms/InstCombine/or-concat.ll index 46a5be83f7e3d..dfc3f0631773a 100644 --- a/llvm/test/Transforms/InstCombine/or-concat.ll +++ b/llvm/test/Transforms/InstCombine/or-concat.ll @@ -86,7 +86,7 @@ define i64 @concat_bswap32_binary(i32 %a0, i32 %a1) { ; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[A1:%.*]] to i64 ; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[A0:%.*]] to i64 ; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 32 -; CHECK-NEXT: [[TMP4:%.*]] = or i64 [[TMP3]], [[TMP1]] +; CHECK-NEXT: [[TMP4:%.*]] = or disjoint i64 [[TMP3]], [[TMP1]] ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP4]]) ; CHECK-NEXT: ret i64 [[TMP5]] ; @@ -104,7 +104,7 @@ define <2 x i64> @concat_bswap32_binary_vector(<2 x i32> %a0, <2 x i32> %a1) { ; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i32> [[A1:%.*]] to <2 x i64> ; CHECK-NEXT: [[TMP2:%.*]] = zext <2 x i32> [[A0:%.*]] to <2 x i64> ; CHECK-NEXT: [[TMP3:%.*]] = shl nuw <2 x i64> [[TMP2]], -; CHECK-NEXT: [[TMP4:%.*]] = or <2 x i64> [[TMP3]], [[TMP1]] +; CHECK-NEXT: [[TMP4:%.*]] = or disjoint <2 x i64> [[TMP3]], [[TMP1]] ; CHECK-NEXT: [[TMP5:%.*]] = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> [[TMP4]]) ; CHECK-NEXT: ret <2 x i64> [[TMP5]] ; @@ -197,7 +197,7 @@ define i64 @concat_bitreverse32_binary(i32 %a0, i32 %a1) { ; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[A1:%.*]] to i64 ; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[A0:%.*]] to i64 ; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 32 -; CHECK-NEXT: [[TMP4:%.*]] = or i64 [[TMP3]], [[TMP1]] +; CHECK-NEXT: [[TMP4:%.*]] = or disjoint i64 [[TMP3]], [[TMP1]] ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[TMP4]]) ; CHECK-NEXT: ret i64 [[TMP5]] ; @@ -215,7 +215,7 @@ define <2 x i64> @concat_bitreverse32_binary_vector(<2 x i32> %a0, <2 x i32> %a1 ; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i32> [[A1:%.*]] to <2 x i64> ; CHECK-NEXT: [[TMP2:%.*]] = zext <2 x i32> [[A0:%.*]] to <2 x i64> ; CHECK-NEXT: [[TMP3:%.*]] = shl nuw <2 x i64> [[TMP2]], -; CHECK-NEXT: [[TMP4:%.*]] = or <2 x i64> [[TMP3]], [[TMP1]] +; CHECK-NEXT: [[TMP4:%.*]] = or disjoint <2 x i64> [[TMP3]], [[TMP1]] ; CHECK-NEXT: [[TMP5:%.*]] = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> [[TMP4]]) ; CHECK-NEXT: ret <2 x i64> [[TMP5]] ; diff --git a/llvm/test/Transforms/InstCombine/or-shifted-masks.ll b/llvm/test/Transforms/InstCombine/or-shifted-masks.ll index e7a209a3f6c39..3d7ca9c1fb123 100644 --- a/llvm/test/Transforms/InstCombine/or-shifted-masks.ll +++ b/llvm/test/Transforms/InstCombine/or-shifted-masks.ll @@ -7,7 +7,7 @@ define i32 @or_and_shifts1(i32 %x) { ; CHECK-NEXT: [[I1:%.*]] = and i32 [[I]], 8 ; CHECK-NEXT: [[I2:%.*]] = shl i32 [[X]], 5 ; CHECK-NEXT: [[I3:%.*]] = and i32 [[I2]], 32 -; CHECK-NEXT: [[I4:%.*]] = or i32 [[I1]], [[I3]] +; CHECK-NEXT: [[I4:%.*]] = or disjoint i32 [[I1]], [[I3]] ; CHECK-NEXT: ret i32 [[I4]] ; %i = shl i32 %x, 3 @@ -24,7 +24,7 @@ define i32 @or_and_shifts2(i32 %x) { ; CHECK-NEXT: [[I1:%.*]] = and i32 [[I]], 896 ; CHECK-NEXT: [[I2:%.*]] = lshr i32 [[X]], 4 ; CHECK-NEXT: [[I3:%.*]] = and i32 [[I2]], 7 -; CHECK-NEXT: [[I4:%.*]] = or i32 [[I1]], [[I3]] +; CHECK-NEXT: [[I4:%.*]] = or disjoint i32 [[I1]], [[I3]] ; CHECK-NEXT: ret i32 [[I4]] ; %i = shl i32 %x, 3 @@ -60,8 +60,8 @@ define i32 @multiuse1(i32 %x) { ; CHECK-NEXT: [[I5:%.*]] = and i32 [[I1]], 2 ; CHECK-NEXT: [[I21:%.*]] = shl i32 [[X]], 6 ; CHECK-NEXT: [[I6:%.*]] = and i32 [[I21]], 384 -; CHECK-NEXT: [[I7:%.*]] = or i32 [[I3]], [[I5]] -; CHECK-NEXT: [[I8:%.*]] = or i32 [[I7]], [[I6]] +; CHECK-NEXT: [[I7:%.*]] = or disjoint i32 [[I3]], [[I5]] +; CHECK-NEXT: [[I8:%.*]] = or disjoint i32 [[I7]], [[I6]] ; CHECK-NEXT: ret i32 [[I8]] ; %i = and i32 %x, 2 @@ -86,9 +86,9 @@ define i32 @multiuse2(i32 %x) { ; CHECK-NEXT: [[I8:%.*]] = and i32 [[I6]], 192 ; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X]], 8 ; CHECK-NEXT: [[I10:%.*]] = and i32 [[TMP1]], 32256 -; CHECK-NEXT: [[I11:%.*]] = or i32 [[I8]], [[I5]] -; CHECK-NEXT: [[I12:%.*]] = or i32 [[I2]], [[I11]] -; CHECK-NEXT: [[I13:%.*]] = or i32 [[I10]], [[I12]] +; CHECK-NEXT: [[I11:%.*]] = or disjoint i32 [[I8]], [[I5]] +; CHECK-NEXT: [[I12:%.*]] = or disjoint i32 [[I2]], [[I11]] +; CHECK-NEXT: [[I13:%.*]] = or disjoint i32 [[I10]], [[I12]] ; CHECK-NEXT: ret i32 [[I13]] ; %i = and i32 %x, 6 @@ -116,8 +116,8 @@ define i32 @multiuse3(i32 %x) { ; CHECK-NEXT: [[I5:%.*]] = and i32 [[TMP1]], 8064 ; CHECK-NEXT: [[I6:%.*]] = lshr i32 [[X]], 1 ; CHECK-NEXT: [[I7:%.*]] = and i32 [[I6]], 15 -; CHECK-NEXT: [[I8:%.*]] = or i32 [[I2]], [[I7]] -; CHECK-NEXT: [[I9:%.*]] = or i32 [[I8]], [[I5]] +; CHECK-NEXT: [[I8:%.*]] = or disjoint i32 [[I2]], [[I7]] +; CHECK-NEXT: [[I9:%.*]] = or disjoint i32 [[I8]], [[I5]] ; CHECK-NEXT: ret i32 [[I9]] ; %i = and i32 %x, 96 @@ -142,7 +142,7 @@ define i32 @multiuse4(i32 %x) local_unnamed_addr { ; CHECK-NEXT: [[I2:%.*]] = and i32 [[I]], 24 ; CHECK-NEXT: [[I3:%.*]] = lshr i32 [[X]], 22 ; CHECK-NEXT: [[I4:%.*]] = and i32 [[I3]], 480 -; CHECK-NEXT: [[I5:%.*]] = or i32 [[I4]], [[I2]] +; CHECK-NEXT: [[I5:%.*]] = or disjoint i32 [[I4]], [[I2]] ; CHECK-NEXT: br label [[END:%.*]] ; CHECK: else: ; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X]], 17 @@ -184,13 +184,13 @@ define i32 @multiuse5(i32 %x) local_unnamed_addr { ; CHECK-NEXT: [[I2:%.*]] = and i32 [[I]], 21760 ; CHECK-NEXT: [[I3:%.*]] = shl i32 [[X]], 5 ; CHECK-NEXT: [[I4:%.*]] = and i32 [[I3]], 43520 -; CHECK-NEXT: [[I5:%.*]] = or i32 [[I4]], [[I2]] +; CHECK-NEXT: [[I5:%.*]] = or disjoint i32 [[I4]], [[I2]] ; CHECK-NEXT: br label [[END:%.*]] ; CHECK: else: ; CHECK-NEXT: [[I6:%.*]] = and i32 [[I]], 5570560 ; CHECK-NEXT: [[I7:%.*]] = shl i32 [[X]], 5 ; CHECK-NEXT: [[I8:%.*]] = and i32 [[I7]], 11141120 -; CHECK-NEXT: [[I9:%.*]] = or i32 [[I8]], [[I6]] +; CHECK-NEXT: [[I9:%.*]] = or disjoint i32 [[I8]], [[I6]] ; CHECK-NEXT: br label [[END]] ; CHECK: end: ; CHECK-NEXT: [[I10:%.*]] = phi i32 [ [[I5]], [[IF]] ], [ [[I9]], [[ELSE]] ] @@ -223,7 +223,7 @@ define i32 @shl_mask(i32 %x) { ; CHECK-LABEL: @shl_mask( ; CHECK-NEXT: [[Z:%.*]] = and i32 [[X:%.*]], 255 ; CHECK-NEXT: [[S:%.*]] = shl nuw nsw i32 [[Z]], 8 -; CHECK-NEXT: [[R:%.*]] = or i32 [[Z]], [[S]] +; CHECK-NEXT: [[R:%.*]] = or disjoint i32 [[Z]], [[S]] ; CHECK-NEXT: ret i32 [[R]] ; %z = and i32 %x, 255 @@ -249,7 +249,7 @@ define i37 @shl_mask_weird_type(i37 %x) { ; CHECK-LABEL: @shl_mask_weird_type( ; CHECK-NEXT: [[Z:%.*]] = and i37 [[X:%.*]], 255 ; CHECK-NEXT: [[S:%.*]] = shl nuw nsw i37 [[Z]], 8 -; CHECK-NEXT: [[R:%.*]] = or i37 [[Z]], [[S]] +; CHECK-NEXT: [[R:%.*]] = or disjoint i37 [[Z]], [[S]] ; CHECK-NEXT: ret i37 [[R]] ; %z = and i37 %x, 255 @@ -263,7 +263,7 @@ define i32 @shl_mask_extra_use(i32 %x, ptr %p) { ; CHECK-NEXT: [[Z:%.*]] = and i32 [[X:%.*]], 255 ; CHECK-NEXT: [[S:%.*]] = shl nuw nsw i32 [[Z]], 8 ; CHECK-NEXT: store i32 [[S]], ptr [[P:%.*]], align 4 -; CHECK-NEXT: [[R:%.*]] = or i32 [[Z]], [[S]] +; CHECK-NEXT: [[R:%.*]] = or disjoint i32 [[Z]], [[S]] ; CHECK-NEXT: ret i32 [[R]] ; %z = and i32 %x, 255 diff --git a/llvm/test/Transforms/InstCombine/or.ll b/llvm/test/Transforms/InstCombine/or.ll index 6a56e54fe6ff2..8bf8c6fcd928b 100644 --- a/llvm/test/Transforms/InstCombine/or.ll +++ b/llvm/test/Transforms/InstCombine/or.ll @@ -160,7 +160,7 @@ define i32 @test21(i32 %t1) { ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[T1:%.*]], -2 ; CHECK-NEXT: [[T3:%.*]] = add i32 [[TMP1]], 2 ; CHECK-NEXT: [[T5:%.*]] = and i32 [[T1]], 1 -; CHECK-NEXT: [[T6:%.*]] = or i32 [[T5]], [[T3]] +; CHECK-NEXT: [[T6:%.*]] = or disjoint i32 [[T5]], [[T3]] ; CHECK-NEXT: ret i32 [[T6]] ; %t1.mask1 = add i32 %t1, 2 @@ -388,7 +388,7 @@ define <2 x i1> @test29vec(<2 x ptr> %A, <2 x ptr> %B) { define i32 @test30(i32 %A) { ; CHECK-LABEL: @test30( ; CHECK-NEXT: [[D:%.*]] = and i32 [[A:%.*]], -58312 -; CHECK-NEXT: [[E:%.*]] = or i32 [[D]], 32962 +; CHECK-NEXT: [[E:%.*]] = or disjoint i32 [[D]], 32962 ; CHECK-NEXT: ret i32 [[E]] ; %B = or i32 %A, 32962 ; 0b1000_0000_1100_0010 @@ -401,7 +401,7 @@ define i32 @test30(i32 %A) { define <2 x i32> @test30vec(<2 x i32> %A) { ; CHECK-LABEL: @test30vec( ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[A:%.*]], -; CHECK-NEXT: [[E:%.*]] = or <2 x i32> [[TMP1]], +; CHECK-NEXT: [[E:%.*]] = or disjoint <2 x i32> [[TMP1]], ; CHECK-NEXT: ret <2 x i32> [[E]] ; %B = or <2 x i32> %A, @@ -415,7 +415,7 @@ define <2 x i32> @test30vec(<2 x i32> %A) { define i64 @test31(i64 %A) { ; CHECK-LABEL: @test31( ; CHECK-NEXT: [[E:%.*]] = and i64 [[A:%.*]], 4294908984 -; CHECK-NEXT: [[F:%.*]] = or i64 [[E]], 32962 +; CHECK-NEXT: [[F:%.*]] = or disjoint i64 [[E]], 32962 ; CHECK-NEXT: ret i64 [[F]] ; %B = or i64 %A, 194 @@ -431,7 +431,7 @@ define i64 @test31(i64 %A) { define <2 x i64> @test31vec(<2 x i64> %A) { ; CHECK-LABEL: @test31vec( ; CHECK-NEXT: [[E:%.*]] = and <2 x i64> [[A:%.*]], -; CHECK-NEXT: [[F:%.*]] = or <2 x i64> [[E]], +; CHECK-NEXT: [[F:%.*]] = or disjoint <2 x i64> [[E]], ; CHECK-NEXT: ret <2 x i64> [[F]] ; %B = or <2 x i64> %A, @@ -1519,7 +1519,7 @@ define i32 @mul_no_common_bits_uses(i32 %p1, i32 %p2) { ; CHECK-NEXT: [[Y:%.*]] = shl i32 [[P2:%.*]], 3 ; CHECK-NEXT: [[M:%.*]] = mul i32 [[X]], [[Y]] ; CHECK-NEXT: call void @use(i32 [[M]]) -; CHECK-NEXT: [[R:%.*]] = or i32 [[M]], [[X]] +; CHECK-NEXT: [[R:%.*]] = or disjoint i32 [[M]], [[X]] ; CHECK-NEXT: ret i32 [[R]] ; %x = and i32 %p1, 7 @@ -1537,7 +1537,7 @@ define i32 @mul_no_common_bits_const_op_uses(i32 %p) { ; CHECK-NEXT: [[X:%.*]] = and i32 [[P:%.*]], 7 ; CHECK-NEXT: [[M:%.*]] = mul nuw nsw i32 [[X]], 24 ; CHECK-NEXT: call void @use(i32 [[M]]) -; CHECK-NEXT: [[R:%.*]] = or i32 [[M]], [[X]] +; CHECK-NEXT: [[R:%.*]] = or disjoint i32 [[M]], [[X]] ; CHECK-NEXT: ret i32 [[R]] ; %x = and i32 %p, 7 diff --git a/llvm/test/Transforms/InstCombine/pr32686.ll b/llvm/test/Transforms/InstCombine/pr32686.ll index 39f9803683c33..51065aa51f954 100644 --- a/llvm/test/Transforms/InstCombine/pr32686.ll +++ b/llvm/test/Transforms/InstCombine/pr32686.ll @@ -11,7 +11,7 @@ define void @tinkywinky() { ; CHECK-NEXT: [[LNOT_EXT:%.*]] = zext i1 [[TOBOOL_NOT]] to i32 ; CHECK-NEXT: [[ZEXT:%.*]] = zext i1 icmp ne (ptr @a, ptr @b) to i32 ; CHECK-NEXT: [[XOR1:%.*]] = or i32 [[ZEXT]], [[LNOT_EXT]] -; CHECK-NEXT: [[OR:%.*]] = or i32 [[XOR1]], 2 +; CHECK-NEXT: [[OR:%.*]] = or disjoint i32 [[XOR1]], 2 ; CHECK-NEXT: store i32 [[OR]], ptr @b, align 4 ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/InstCombine/select-ctlz-to-cttz.ll b/llvm/test/Transforms/InstCombine/select-ctlz-to-cttz.ll index 91ef90db72bd1..fa8443d4c9578 100644 --- a/llvm/test/Transforms/InstCombine/select-ctlz-to-cttz.ll +++ b/llvm/test/Transforms/InstCombine/select-ctlz-to-cttz.ll @@ -156,7 +156,7 @@ define i64 @select_clz_to_ctz_i64_wrong_xor(i64 %a) { ; CHECK-NEXT: [[SUB:%.*]] = sub i64 0, [[A:%.*]] ; CHECK-NEXT: [[AND:%.*]] = and i64 [[SUB]], [[A]] ; CHECK-NEXT: [[LZ:%.*]] = tail call i64 @llvm.ctlz.i64(i64 [[AND]], i1 true), !range [[RNG1]] -; CHECK-NEXT: [[SUB11:%.*]] = or i64 [[LZ]], 64 +; CHECK-NEXT: [[SUB11:%.*]] = or disjoint i64 [[LZ]], 64 ; CHECK-NEXT: ret i64 [[SUB11]] ; %sub = sub i64 0, %a diff --git a/llvm/test/Transforms/InstCombine/select-icmp-and.ll b/llvm/test/Transforms/InstCombine/select-icmp-and.ll index 283c01b3aacd5..e1799d1091dac 100644 --- a/llvm/test/Transforms/InstCombine/select-icmp-and.ll +++ b/llvm/test/Transforms/InstCombine/select-icmp-and.ll @@ -455,7 +455,7 @@ define i32 @clear_to_clear(i32 %x) { ; CHECK-LABEL: @clear_to_clear( ; CHECK-NEXT: [[T1:%.*]] = and i32 [[X:%.*]], 8 ; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[T1]], 0 -; CHECK-NEXT: [[T3:%.*]] = or i32 [[T1]], -11 +; CHECK-NEXT: [[T3:%.*]] = or disjoint i32 [[T1]], -11 ; CHECK-NEXT: call void @use1(i1 [[T2]]) ; CHECK-NEXT: ret i32 [[T3]] ; @@ -473,7 +473,7 @@ define i32 @set_to_set(i32 %x) { ; CHECK-LABEL: @set_to_set( ; CHECK-NEXT: [[T1:%.*]] = and i32 [[X:%.*]], 8 ; CHECK-NEXT: [[T2:%.*]] = icmp ne i32 [[T1]], 0 -; CHECK-NEXT: [[T3:%.*]] = or i32 [[T1]], -11 +; CHECK-NEXT: [[T3:%.*]] = or disjoint i32 [[T1]], -11 ; CHECK-NEXT: call void @use1(i1 [[T2]]) ; CHECK-NEXT: ret i32 [[T3]] ; @@ -520,7 +520,7 @@ define i8 @clear_to_set_decomposebittest(i8 %x) { define i8 @clear_to_clear_decomposebittest(i8 %x) { ; CHECK-LABEL: @clear_to_clear_decomposebittest( ; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X:%.*]], -128 -; CHECK-NEXT: [[T3:%.*]] = or i8 [[TMP1]], 3 +; CHECK-NEXT: [[T3:%.*]] = or disjoint i8 [[TMP1]], 3 ; CHECK-NEXT: ret i8 [[T3]] ; %t2 = icmp sgt i8 %x, -1 @@ -533,7 +533,7 @@ define i8 @clear_to_clear_decomposebittest(i8 %x) { define i8 @set_to_set_decomposebittest(i8 %x) { ; CHECK-LABEL: @set_to_set_decomposebittest( ; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X:%.*]], -128 -; CHECK-NEXT: [[T3:%.*]] = or i8 [[TMP1]], 3 +; CHECK-NEXT: [[T3:%.*]] = or disjoint i8 [[TMP1]], 3 ; CHECK-NEXT: ret i8 [[T3]] ; %t2 = icmp slt i8 %x, 0 diff --git a/llvm/test/Transforms/InstCombine/select-with-bitwise-ops.ll b/llvm/test/Transforms/InstCombine/select-with-bitwise-ops.ll index c4a5d9bc5bf79..71d898a3ed9bd 100644 --- a/llvm/test/Transforms/InstCombine/select-with-bitwise-ops.ll +++ b/llvm/test/Transforms/InstCombine/select-with-bitwise-ops.ll @@ -1463,7 +1463,7 @@ define i8 @set_bits(i8 %x, i1 %b) { ; CHECK-LABEL: @set_bits( ; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], -6 ; CHECK-NEXT: [[MASKSEL:%.*]] = select i1 [[B:%.*]], i8 5, i8 0 -; CHECK-NEXT: [[COND:%.*]] = or i8 [[AND]], [[MASKSEL]] +; CHECK-NEXT: [[COND:%.*]] = or disjoint i8 [[AND]], [[MASKSEL]] ; CHECK-NEXT: ret i8 [[COND]] ; %and = and i8 %x, 250 @@ -1492,7 +1492,7 @@ define i8 @set_bits_extra_use1(i8 %x, i1 %b) { ; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], -6 ; CHECK-NEXT: call void @use(i8 [[AND]]) ; CHECK-NEXT: [[MASKSEL:%.*]] = select i1 [[B:%.*]], i8 5, i8 0 -; CHECK-NEXT: [[COND:%.*]] = or i8 [[AND]], [[MASKSEL]] +; CHECK-NEXT: [[COND:%.*]] = or disjoint i8 [[AND]], [[MASKSEL]] ; CHECK-NEXT: ret i8 [[COND]] ; %and = and i8 %x, 250 @@ -1523,7 +1523,7 @@ define <2 x i8> @clear_bits(<2 x i8> %x, <2 x i1> %b) { ; CHECK-LABEL: @clear_bits( ; CHECK-NEXT: [[AND:%.*]] = and <2 x i8> [[X:%.*]], ; CHECK-NEXT: [[MASKSEL:%.*]] = select <2 x i1> [[B:%.*]], <2 x i8> zeroinitializer, <2 x i8> -; CHECK-NEXT: [[COND:%.*]] = or <2 x i8> [[AND]], [[MASKSEL]] +; CHECK-NEXT: [[COND:%.*]] = or disjoint <2 x i8> [[AND]], [[MASKSEL]] ; CHECK-NEXT: ret <2 x i8> [[COND]] ; %and = and <2 x i8> %x, @@ -1552,7 +1552,7 @@ define <2 x i8> @clear_bits_extra_use1(<2 x i8> %x, i1 %b) { ; CHECK-NEXT: [[AND:%.*]] = and <2 x i8> [[X:%.*]], ; CHECK-NEXT: call void @use_vec(<2 x i8> [[AND]]) ; CHECK-NEXT: [[MASKSEL:%.*]] = select i1 [[B:%.*]], <2 x i8> zeroinitializer, <2 x i8> -; CHECK-NEXT: [[COND:%.*]] = or <2 x i8> [[AND]], [[MASKSEL]] +; CHECK-NEXT: [[COND:%.*]] = or disjoint <2 x i8> [[AND]], [[MASKSEL]] ; CHECK-NEXT: ret <2 x i8> [[COND]] ; %and = and <2 x i8> %x, diff --git a/llvm/test/Transforms/InstCombine/select.ll b/llvm/test/Transforms/InstCombine/select.ll index 6f24758effac2..7583a75385a76 100644 --- a/llvm/test/Transforms/InstCombine/select.ll +++ b/llvm/test/Transforms/InstCombine/select.ll @@ -3612,7 +3612,7 @@ define i32 @pr62088() { ; CHECK: loop: ; CHECK-NEXT: [[NOT2:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ -2, [[LOOP]] ] ; CHECK-NEXT: [[H_0:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ 1, [[LOOP]] ] -; CHECK-NEXT: [[XOR1:%.*]] = or i32 [[H_0]], [[NOT2]] +; CHECK-NEXT: [[XOR1:%.*]] = or disjoint i32 [[H_0]], [[NOT2]] ; CHECK-NEXT: [[SUB5:%.*]] = sub i32 -1824888657, [[XOR1]] ; CHECK-NEXT: [[XOR6:%.*]] = xor i32 [[SUB5]], -1260914025 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[XOR6]], 824855120 diff --git a/llvm/test/Transforms/InstCombine/shift-shift.ll b/llvm/test/Transforms/InstCombine/shift-shift.ll index 4270a2c2ba205..8a40863300d45 100644 --- a/llvm/test/Transforms/InstCombine/shift-shift.ll +++ b/llvm/test/Transforms/InstCombine/shift-shift.ll @@ -458,7 +458,7 @@ define i8 @shl_lshr_demand3(i8 %x) { ; CHECK-LABEL: @shl_lshr_demand3( ; CHECK-NEXT: [[SHL:%.*]] = shl i8 40, [[X:%.*]] ; CHECK-NEXT: [[LSHR:%.*]] = lshr exact i8 [[SHL]], 3 -; CHECK-NEXT: [[R:%.*]] = or i8 [[LSHR]], -64 +; CHECK-NEXT: [[R:%.*]] = or disjoint i8 [[LSHR]], -64 ; CHECK-NEXT: ret i8 [[R]] ; %shl = shl i8 40, %x ; 0b0010_1000 @@ -473,7 +473,7 @@ define i8 @shl_lshr_demand4(i8 %x) { ; CHECK-LABEL: @shl_lshr_demand4( ; CHECK-NEXT: [[SHL:%.*]] = shl i8 44, [[X:%.*]] ; CHECK-NEXT: [[LSHR:%.*]] = lshr i8 [[SHL]], 3 -; CHECK-NEXT: [[R:%.*]] = or i8 [[LSHR]], -32 +; CHECK-NEXT: [[R:%.*]] = or disjoint i8 [[LSHR]], -32 ; CHECK-NEXT: ret i8 [[R]] ; %shl = shl i8 44, %x ; 0b0010_1100 @@ -621,7 +621,7 @@ define i8 @lshr_shl_demand3(i8 %x) { ; CHECK-LABEL: @lshr_shl_demand3( ; CHECK-NEXT: [[SHR:%.*]] = lshr i8 28, [[X:%.*]] ; CHECK-NEXT: [[SHL:%.*]] = shl nuw i8 [[SHR]], 3 -; CHECK-NEXT: [[R:%.*]] = or i8 [[SHL]], 3 +; CHECK-NEXT: [[R:%.*]] = or disjoint i8 [[SHL]], 3 ; CHECK-NEXT: ret i8 [[R]] ; %shr = lshr i8 28, %x ; 0b0001_1100 @@ -636,7 +636,7 @@ define i8 @lshr_shl_demand4(i8 %x) { ; CHECK-LABEL: @lshr_shl_demand4( ; CHECK-NEXT: [[SHR:%.*]] = lshr i8 60, [[X:%.*]] ; CHECK-NEXT: [[SHL:%.*]] = shl i8 [[SHR]], 3 -; CHECK-NEXT: [[R:%.*]] = or i8 [[SHL]], 7 +; CHECK-NEXT: [[R:%.*]] = or disjoint i8 [[SHL]], 7 ; CHECK-NEXT: ret i8 [[R]] ; %shr = lshr i8 60, %x ; 0b0011_1100 diff --git a/llvm/test/Transforms/InstCombine/shift.ll b/llvm/test/Transforms/InstCombine/shift.ll index bad6a995cb75d..54ff690eb85bc 100644 --- a/llvm/test/Transforms/InstCombine/shift.ll +++ b/llvm/test/Transforms/InstCombine/shift.ll @@ -171,7 +171,7 @@ define i8 @test13a(i8 %A) { define i32 @test14(i32 %A) { ; CHECK-LABEL: @test14( ; CHECK-NEXT: [[B:%.*]] = and i32 [[A:%.*]], -19760 -; CHECK-NEXT: [[C:%.*]] = or i32 [[B]], 19744 +; CHECK-NEXT: [[C:%.*]] = or disjoint i32 [[B]], 19744 ; CHECK-NEXT: ret i32 [[C]] ; %B = lshr i32 %A, 4 @@ -660,7 +660,7 @@ define i8 @test39(i32 %a0) { ; CHECK-NEXT: [[I51:%.*]] = xor i8 [[I50]], [[I5]] ; CHECK-NEXT: [[TMP0:%.*]] = lshr exact i8 [[I5]], 3 ; CHECK-NEXT: [[I54:%.*]] = and i8 [[TMP0]], 16 -; CHECK-NEXT: [[I551:%.*]] = or i8 [[I54]], [[I51]] +; CHECK-NEXT: [[I551:%.*]] = or disjoint i8 [[I54]], [[I51]] ; CHECK-NEXT: ret i8 [[I551]] ; entry: @@ -1041,7 +1041,7 @@ define i32 @test56(i32 %x) { ; CHECK-LABEL: @test56( ; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], 3 ; CHECK-NEXT: [[SHL:%.*]] = and i32 [[TMP1]], -16 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL]], 7 +; CHECK-NEXT: [[OR:%.*]] = or disjoint i32 [[SHL]], 7 ; CHECK-NEXT: ret i32 [[OR]] ; %shr2 = lshr i32 %x, 1 @@ -1054,7 +1054,7 @@ define i32 @test57(i32 %x) { ; CHECK-LABEL: @test57( ; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], 3 ; CHECK-NEXT: [[SHL:%.*]] = and i32 [[TMP1]], -16 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL]], 7 +; CHECK-NEXT: [[OR:%.*]] = or disjoint i32 [[SHL]], 7 ; CHECK-NEXT: ret i32 [[OR]] ; %shr = ashr i32 %x, 1 @@ -1091,7 +1091,7 @@ define i32 @test59(i32 %x) { ; CHECK-LABEL: @test59( ; CHECK-NEXT: [[TMP1:%.*]] = ashr i32 [[X:%.*]], 3 ; CHECK-NEXT: [[SHL:%.*]] = and i32 [[TMP1]], -4 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL]], 2 +; CHECK-NEXT: [[OR:%.*]] = or disjoint i32 [[SHL]], 2 ; CHECK-NEXT: ret i32 [[OR]] ; %shr = ashr i32 %x, 4 @@ -1915,7 +1915,7 @@ define <2 x i32> @lshr_mul_negpow2_5(<2 x i32> %x) { ; CHECK-LABEL: @lshr_mul_negpow2_5( ; CHECK-NEXT: [[TMP1:%.*]] = sub <2 x i32> zeroinitializer, [[X:%.*]] ; CHECK-NEXT: [[A:%.*]] = and <2 x i32> [[TMP1]], -; CHECK-NEXT: [[B:%.*]] = or <2 x i32> [[A]], +; CHECK-NEXT: [[B:%.*]] = or disjoint <2 x i32> [[A]], ; CHECK-NEXT: ret <2 x i32> [[B]] ; %a = mul <2 x i32> %x, diff --git a/llvm/test/Transforms/InstCombine/sub-of-negatible-inseltpoison.ll b/llvm/test/Transforms/InstCombine/sub-of-negatible-inseltpoison.ll index d76564dd7a67e..fd2236860add3 100644 --- a/llvm/test/Transforms/InstCombine/sub-of-negatible-inseltpoison.ll +++ b/llvm/test/Transforms/InstCombine/sub-of-negatible-inseltpoison.ll @@ -1020,7 +1020,7 @@ define i8 @negation_of_increment_via_or_with_no_common_bits_set(i8 %x, i8 %y) { define i8 @negation_of_increment_via_or_with_no_common_bits_set_extrause(i8 %x, i8 %y) { ; CHECK-LABEL: @negation_of_increment_via_or_with_no_common_bits_set_extrause( ; CHECK-NEXT: [[T0:%.*]] = shl i8 [[Y:%.*]], 1 -; CHECK-NEXT: [[T1:%.*]] = or i8 [[T0]], 1 +; CHECK-NEXT: [[T1:%.*]] = or disjoint i8 [[T0]], 1 ; CHECK-NEXT: call void @use8(i8 [[T1]]) ; CHECK-NEXT: [[T2:%.*]] = sub i8 [[X:%.*]], [[T1]] ; CHECK-NEXT: ret i8 [[T2]] @@ -1082,7 +1082,7 @@ define i8 @add_via_or_with_no_common_bits_set_extrause(i8 %x, i8 %y) { ; CHECK-NEXT: [[T0:%.*]] = sub i8 0, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = shl i8 [[T0]], 2 -; CHECK-NEXT: [[T2:%.*]] = or i8 [[T1]], 3 +; CHECK-NEXT: [[T2:%.*]] = or disjoint i8 [[T1]], 3 ; CHECK-NEXT: call void @use8(i8 [[T2]]) ; CHECK-NEXT: [[T3:%.*]] = sub i8 [[X:%.*]], [[T2]] ; CHECK-NEXT: ret i8 [[T3]] diff --git a/llvm/test/Transforms/InstCombine/sub-of-negatible.ll b/llvm/test/Transforms/InstCombine/sub-of-negatible.ll index aacc83eba0061..64cb647ae3f74 100644 --- a/llvm/test/Transforms/InstCombine/sub-of-negatible.ll +++ b/llvm/test/Transforms/InstCombine/sub-of-negatible.ll @@ -1044,7 +1044,7 @@ define i8 @negation_of_increment_via_or_with_no_common_bits_set(i8 %x, i8 %y) { define i8 @negation_of_increment_via_or_with_no_common_bits_set_extrause(i8 %x, i8 %y) { ; CHECK-LABEL: @negation_of_increment_via_or_with_no_common_bits_set_extrause( ; CHECK-NEXT: [[T0:%.*]] = shl i8 [[Y:%.*]], 1 -; CHECK-NEXT: [[T1:%.*]] = or i8 [[T0]], 1 +; CHECK-NEXT: [[T1:%.*]] = or disjoint i8 [[T0]], 1 ; CHECK-NEXT: call void @use8(i8 [[T1]]) ; CHECK-NEXT: [[T2:%.*]] = sub i8 [[X:%.*]], [[T1]] ; CHECK-NEXT: ret i8 [[T2]] @@ -1106,7 +1106,7 @@ define i8 @add_via_or_with_no_common_bits_set_extrause(i8 %x, i8 %y) { ; CHECK-NEXT: [[T0:%.*]] = sub i8 0, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = shl i8 [[T0]], 2 -; CHECK-NEXT: [[T2:%.*]] = or i8 [[T1]], 3 +; CHECK-NEXT: [[T2:%.*]] = or disjoint i8 [[T1]], 3 ; CHECK-NEXT: call void @use8(i8 [[T2]]) ; CHECK-NEXT: [[T3:%.*]] = sub i8 [[X:%.*]], [[T2]] ; CHECK-NEXT: ret i8 [[T3]] diff --git a/llvm/test/Transforms/InstCombine/trunc-demand.ll b/llvm/test/Transforms/InstCombine/trunc-demand.ll index b20b3b49e9046..4f6e79285eaa8 100644 --- a/llvm/test/Transforms/InstCombine/trunc-demand.ll +++ b/llvm/test/Transforms/InstCombine/trunc-demand.ll @@ -130,7 +130,7 @@ define i6 @or_trunc_lshr(i8 %x) { ; CHECK-LABEL: @or_trunc_lshr( ; CHECK-NEXT: [[TMP1:%.*]] = trunc i8 [[X:%.*]] to i6 ; CHECK-NEXT: [[TMP2:%.*]] = lshr i6 [[TMP1]], 1 -; CHECK-NEXT: [[R:%.*]] = or i6 [[TMP2]], -32 +; CHECK-NEXT: [[R:%.*]] = or disjoint i6 [[TMP2]], -32 ; CHECK-NEXT: ret i6 [[R]] ; %s = lshr i8 %x, 1 @@ -143,7 +143,7 @@ define i6 @or_trunc_lshr_more(i8 %x) { ; CHECK-LABEL: @or_trunc_lshr_more( ; CHECK-NEXT: [[TMP1:%.*]] = trunc i8 [[X:%.*]] to i6 ; CHECK-NEXT: [[TMP2:%.*]] = lshr i6 [[TMP1]], 4 -; CHECK-NEXT: [[R:%.*]] = or i6 [[TMP2]], -4 +; CHECK-NEXT: [[R:%.*]] = or disjoint i6 [[TMP2]], -4 ; CHECK-NEXT: ret i6 [[R]] ; %s = lshr i8 %x, 4 diff --git a/llvm/test/Transforms/InstCombine/trunc-inseltpoison.ll b/llvm/test/Transforms/InstCombine/trunc-inseltpoison.ll index d4898679b033c..87c90bb91f39e 100644 --- a/llvm/test/Transforms/InstCombine/trunc-inseltpoison.ll +++ b/llvm/test/Transforms/InstCombine/trunc-inseltpoison.ll @@ -257,7 +257,7 @@ define i64 @test8(i32 %A, i32 %B) { ; CHECK-NEXT: [[C:%.*]] = zext i32 [[A:%.*]] to i64 ; CHECK-NEXT: [[D:%.*]] = zext i32 [[B:%.*]] to i64 ; CHECK-NEXT: [[E:%.*]] = shl nuw i64 [[D]], 32 -; CHECK-NEXT: [[F:%.*]] = or i64 [[E]], [[C]] +; CHECK-NEXT: [[F:%.*]] = or disjoint i64 [[E]], [[C]] ; CHECK-NEXT: ret i64 [[F]] ; %C = zext i32 %A to i128 @@ -273,7 +273,7 @@ define <2 x i64> @test8_vec(<2 x i32> %A, <2 x i32> %B) { ; CHECK-NEXT: [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64> ; CHECK-NEXT: [[D:%.*]] = zext <2 x i32> [[B:%.*]] to <2 x i64> ; CHECK-NEXT: [[E:%.*]] = shl nuw <2 x i64> [[D]], -; CHECK-NEXT: [[F:%.*]] = or <2 x i64> [[E]], [[C]] +; CHECK-NEXT: [[F:%.*]] = or disjoint <2 x i64> [[E]], [[C]] ; CHECK-NEXT: ret <2 x i64> [[F]] ; %C = zext <2 x i32> %A to <2 x i128> @@ -289,7 +289,7 @@ define <2 x i64> @test8_vec_nonuniform(<2 x i32> %A, <2 x i32> %B) { ; CHECK-NEXT: [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64> ; CHECK-NEXT: [[D:%.*]] = zext <2 x i32> [[B:%.*]] to <2 x i64> ; CHECK-NEXT: [[E:%.*]] = shl <2 x i64> [[D]], -; CHECK-NEXT: [[F:%.*]] = or <2 x i64> [[E]], [[C]] +; CHECK-NEXT: [[F:%.*]] = or disjoint <2 x i64> [[E]], [[C]] ; CHECK-NEXT: ret <2 x i64> [[F]] ; %C = zext <2 x i32> %A to <2 x i128> diff --git a/llvm/test/Transforms/InstCombine/trunc.ll b/llvm/test/Transforms/InstCombine/trunc.ll index 7c5bc2e68edc2..adcad30036818 100644 --- a/llvm/test/Transforms/InstCombine/trunc.ll +++ b/llvm/test/Transforms/InstCombine/trunc.ll @@ -257,7 +257,7 @@ define i64 @test8(i32 %A, i32 %B) { ; CHECK-NEXT: [[C:%.*]] = zext i32 [[A:%.*]] to i64 ; CHECK-NEXT: [[D:%.*]] = zext i32 [[B:%.*]] to i64 ; CHECK-NEXT: [[E:%.*]] = shl nuw i64 [[D]], 32 -; CHECK-NEXT: [[F:%.*]] = or i64 [[E]], [[C]] +; CHECK-NEXT: [[F:%.*]] = or disjoint i64 [[E]], [[C]] ; CHECK-NEXT: ret i64 [[F]] ; %C = zext i32 %A to i128 @@ -273,7 +273,7 @@ define <2 x i64> @test8_vec(<2 x i32> %A, <2 x i32> %B) { ; CHECK-NEXT: [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64> ; CHECK-NEXT: [[D:%.*]] = zext <2 x i32> [[B:%.*]] to <2 x i64> ; CHECK-NEXT: [[E:%.*]] = shl nuw <2 x i64> [[D]], -; CHECK-NEXT: [[F:%.*]] = or <2 x i64> [[E]], [[C]] +; CHECK-NEXT: [[F:%.*]] = or disjoint <2 x i64> [[E]], [[C]] ; CHECK-NEXT: ret <2 x i64> [[F]] ; %C = zext <2 x i32> %A to <2 x i128> @@ -289,7 +289,7 @@ define <2 x i64> @test8_vec_nonuniform(<2 x i32> %A, <2 x i32> %B) { ; CHECK-NEXT: [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64> ; CHECK-NEXT: [[D:%.*]] = zext <2 x i32> [[B:%.*]] to <2 x i64> ; CHECK-NEXT: [[E:%.*]] = shl <2 x i64> [[D]], -; CHECK-NEXT: [[F:%.*]] = or <2 x i64> [[E]], [[C]] +; CHECK-NEXT: [[F:%.*]] = or disjoint <2 x i64> [[E]], [[C]] ; CHECK-NEXT: ret <2 x i64> [[F]] ; %C = zext <2 x i32> %A to <2 x i128> diff --git a/llvm/test/Transforms/InstCombine/unfold-masked-merge-with-const-mask-scalar.ll b/llvm/test/Transforms/InstCombine/unfold-masked-merge-with-const-mask-scalar.ll index b2309373bec30..b5c85690a6b02 100644 --- a/llvm/test/Transforms/InstCombine/unfold-masked-merge-with-const-mask-scalar.ll +++ b/llvm/test/Transforms/InstCombine/unfold-masked-merge-with-const-mask-scalar.ll @@ -10,7 +10,7 @@ define i4 @scalar0 (i4 %x, i4 %y) { ; CHECK-LABEL: @scalar0( ; CHECK-NEXT: [[TMP1:%.*]] = and i4 [[X:%.*]], 1 ; CHECK-NEXT: [[TMP2:%.*]] = and i4 [[Y:%.*]], -2 -; CHECK-NEXT: [[R:%.*]] = or i4 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[R:%.*]] = or disjoint i4 [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret i4 [[R]] ; %n0 = xor i4 %x, %y @@ -23,7 +23,7 @@ define i4 @scalar1 (i4 %x, i4 %y) { ; CHECK-LABEL: @scalar1( ; CHECK-NEXT: [[TMP1:%.*]] = and i4 [[X:%.*]], -2 ; CHECK-NEXT: [[TMP2:%.*]] = and i4 [[Y:%.*]], 1 -; CHECK-NEXT: [[R:%.*]] = or i4 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[R:%.*]] = or disjoint i4 [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret i4 [[R]] ; %n0 = xor i4 %x, %y @@ -91,7 +91,7 @@ define i4 @c_1_0_0 (i4 %x, i4 %y) { ; CHECK-LABEL: @c_1_0_0( ; CHECK-NEXT: [[TMP1:%.*]] = and i4 [[X:%.*]], -2 ; CHECK-NEXT: [[TMP2:%.*]] = and i4 [[Y:%.*]], 1 -; CHECK-NEXT: [[R:%.*]] = or i4 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[R:%.*]] = or disjoint i4 [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret i4 [[R]] ; %n0 = xor i4 %y, %x ; swapped order @@ -104,7 +104,7 @@ define i4 @c_0_1_0 (i4 %x, i4 %y) { ; CHECK-LABEL: @c_0_1_0( ; CHECK-NEXT: [[TMP1:%.*]] = and i4 [[Y:%.*]], -2 ; CHECK-NEXT: [[TMP2:%.*]] = and i4 [[X:%.*]], 1 -; CHECK-NEXT: [[R:%.*]] = or i4 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[R:%.*]] = or disjoint i4 [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret i4 [[R]] ; %n0 = xor i4 %x, %y @@ -119,7 +119,7 @@ define i4 @c_0_0_1 () { ; CHECK-NEXT: [[Y:%.*]] = call i4 @gen4() ; CHECK-NEXT: [[TMP1:%.*]] = and i4 [[X]], -2 ; CHECK-NEXT: [[TMP2:%.*]] = and i4 [[Y]], 1 -; CHECK-NEXT: [[R:%.*]] = or i4 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[R:%.*]] = or disjoint i4 [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret i4 [[R]] ; %x = call i4 @gen4() @@ -134,7 +134,7 @@ define i4 @c_1_1_0 (i4 %x, i4 %y) { ; CHECK-LABEL: @c_1_1_0( ; CHECK-NEXT: [[TMP1:%.*]] = and i4 [[Y:%.*]], -2 ; CHECK-NEXT: [[TMP2:%.*]] = and i4 [[X:%.*]], 1 -; CHECK-NEXT: [[R:%.*]] = or i4 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[R:%.*]] = or disjoint i4 [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret i4 [[R]] ; %n0 = xor i4 %y, %x ; swapped order @@ -148,7 +148,7 @@ define i4 @c_1_0_1 (i4 %x) { ; CHECK-NEXT: [[Y:%.*]] = call i4 @gen4() ; CHECK-NEXT: [[TMP1:%.*]] = and i4 [[X:%.*]], -2 ; CHECK-NEXT: [[TMP2:%.*]] = and i4 [[Y]], 1 -; CHECK-NEXT: [[R:%.*]] = or i4 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[R:%.*]] = or disjoint i4 [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret i4 [[R]] ; %y = call i4 @gen4() @@ -163,7 +163,7 @@ define i4 @c_0_1_1 (i4 %y) { ; CHECK-NEXT: [[X:%.*]] = call i4 @gen4() ; CHECK-NEXT: [[TMP1:%.*]] = and i4 [[Y:%.*]], -2 ; CHECK-NEXT: [[TMP2:%.*]] = and i4 [[X]], 1 -; CHECK-NEXT: [[R:%.*]] = or i4 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[R:%.*]] = or disjoint i4 [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret i4 [[R]] ; %x = call i4 @gen4() @@ -179,7 +179,7 @@ define i4 @c_1_1_1 () { ; CHECK-NEXT: [[Y:%.*]] = call i4 @gen4() ; CHECK-NEXT: [[TMP1:%.*]] = and i4 [[Y]], -2 ; CHECK-NEXT: [[TMP2:%.*]] = and i4 [[X]], 1 -; CHECK-NEXT: [[R:%.*]] = or i4 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[R:%.*]] = or disjoint i4 [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret i4 [[R]] ; %x = call i4 @gen4() diff --git a/llvm/test/Transforms/InstCombine/unfold-masked-merge-with-const-mask-vector.ll b/llvm/test/Transforms/InstCombine/unfold-masked-merge-with-const-mask-vector.ll index da8803739c94d..35de89a461fa0 100644 --- a/llvm/test/Transforms/InstCombine/unfold-masked-merge-with-const-mask-vector.ll +++ b/llvm/test/Transforms/InstCombine/unfold-masked-merge-with-const-mask-vector.ll @@ -10,7 +10,7 @@ define <2 x i4> @splat (<2 x i4> %x, <2 x i4> %y) { ; CHECK-LABEL: @splat( ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i4> [[X:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i4> [[Y:%.*]], -; CHECK-NEXT: [[R:%.*]] = or <2 x i4> [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[R:%.*]] = or disjoint <2 x i4> [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret <2 x i4> [[R]] ; %n0 = xor <2 x i4> %x, %y @@ -74,7 +74,7 @@ define <2 x i4> @in_constant_varx_14(<2 x i4> %x, <2 x i4> %mask) { define <2 x i4> @in_constant_varx_14_nonsplat(<2 x i4> %x, <2 x i4> %mask) { ; CHECK-LABEL: @in_constant_varx_14_nonsplat( ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i4> [[X:%.*]], -; CHECK-NEXT: [[R:%.*]] = or <2 x i4> [[TMP1]], +; CHECK-NEXT: [[R:%.*]] = or disjoint <2 x i4> [[TMP1]], ; CHECK-NEXT: ret <2 x i4> [[R]] ; %n0 = xor <2 x i4> %x, ; %x @@ -120,7 +120,7 @@ define <2 x i4> @in_constant_14_vary(<2 x i4> %y, <2 x i4> %mask) { define <2 x i4> @in_constant_14_vary_nonsplat(<2 x i4> %y, <2 x i4> %mask) { ; CHECK-LABEL: @in_constant_14_vary_nonsplat( ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i4> [[Y:%.*]], -; CHECK-NEXT: [[R:%.*]] = or <2 x i4> [[TMP1]], +; CHECK-NEXT: [[R:%.*]] = or disjoint <2 x i4> [[TMP1]], ; CHECK-NEXT: ret <2 x i4> [[R]] ; %n0 = xor <2 x i4> %y, ; %x @@ -152,7 +152,7 @@ define <2 x i4> @c_1_0_0 (<2 x i4> %x, <2 x i4> %y) { ; CHECK-LABEL: @c_1_0_0( ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i4> [[X:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i4> [[Y:%.*]], -; CHECK-NEXT: [[R:%.*]] = or <2 x i4> [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[R:%.*]] = or disjoint <2 x i4> [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret <2 x i4> [[R]] ; %n0 = xor <2 x i4> %y, %x ; swapped order @@ -165,7 +165,7 @@ define <2 x i4> @c_0_1_0 (<2 x i4> %x, <2 x i4> %y) { ; CHECK-LABEL: @c_0_1_0( ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i4> [[Y:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i4> [[X:%.*]], -; CHECK-NEXT: [[R:%.*]] = or <2 x i4> [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[R:%.*]] = or disjoint <2 x i4> [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret <2 x i4> [[R]] ; %n0 = xor <2 x i4> %x, %y @@ -180,7 +180,7 @@ define <2 x i4> @c_0_0_1 () { ; CHECK-NEXT: [[Y:%.*]] = call <2 x i4> @gen4() ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i4> [[X]], ; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i4> [[Y]], -; CHECK-NEXT: [[R:%.*]] = or <2 x i4> [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[R:%.*]] = or disjoint <2 x i4> [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret <2 x i4> [[R]] ; %x = call <2 x i4> @gen4() @@ -195,7 +195,7 @@ define <2 x i4> @c_1_1_0 (<2 x i4> %x, <2 x i4> %y) { ; CHECK-LABEL: @c_1_1_0( ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i4> [[Y:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i4> [[X:%.*]], -; CHECK-NEXT: [[R:%.*]] = or <2 x i4> [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[R:%.*]] = or disjoint <2 x i4> [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret <2 x i4> [[R]] ; %n0 = xor <2 x i4> %y, %x ; swapped order @@ -209,7 +209,7 @@ define <2 x i4> @c_1_0_1 (<2 x i4> %x) { ; CHECK-NEXT: [[Y:%.*]] = call <2 x i4> @gen4() ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i4> [[X:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i4> [[Y]], -; CHECK-NEXT: [[R:%.*]] = or <2 x i4> [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[R:%.*]] = or disjoint <2 x i4> [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret <2 x i4> [[R]] ; %y = call <2 x i4> @gen4() @@ -224,7 +224,7 @@ define <2 x i4> @c_0_1_1 (<2 x i4> %y) { ; CHECK-NEXT: [[X:%.*]] = call <2 x i4> @gen4() ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i4> [[Y:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i4> [[X]], -; CHECK-NEXT: [[R:%.*]] = or <2 x i4> [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[R:%.*]] = or disjoint <2 x i4> [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret <2 x i4> [[R]] ; %x = call <2 x i4> @gen4() @@ -240,7 +240,7 @@ define <2 x i4> @c_1_1_1 () { ; CHECK-NEXT: [[Y:%.*]] = call <2 x i4> @gen4() ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i4> [[Y]], ; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i4> [[X]], -; CHECK-NEXT: [[R:%.*]] = or <2 x i4> [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[R:%.*]] = or disjoint <2 x i4> [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret <2 x i4> [[R]] ; %x = call <2 x i4> @gen4() diff --git a/llvm/test/Transforms/InstCombine/xor.ll b/llvm/test/Transforms/InstCombine/xor.ll index d4b49afe9efe6..7c61401fca07c 100644 --- a/llvm/test/Transforms/InstCombine/xor.ll +++ b/llvm/test/Transforms/InstCombine/xor.ll @@ -72,7 +72,7 @@ define i32 @test7(i32 %A, i32 %B) { ; CHECK-LABEL: @test7( ; CHECK-NEXT: [[A1:%.*]] = and i32 [[A:%.*]], 7 ; CHECK-NEXT: [[B1:%.*]] = and i32 [[B:%.*]], 128 -; CHECK-NEXT: [[C11:%.*]] = or i32 [[A1]], [[B1]] +; CHECK-NEXT: [[C11:%.*]] = or disjoint i32 [[A1]], [[B1]] ; CHECK-NEXT: ret i32 [[C11]] ; %A1 = and i32 %A, 7 @@ -122,7 +122,7 @@ define <2 x i1> @test9vec(<2 x i8> %a) { define i8 @test10(i8 %A) { ; CHECK-LABEL: @test10( ; CHECK-NEXT: [[B:%.*]] = and i8 [[A:%.*]], 3 -; CHECK-NEXT: [[C1:%.*]] = or i8 [[B]], 4 +; CHECK-NEXT: [[C1:%.*]] = or disjoint i8 [[B]], 4 ; CHECK-NEXT: ret i8 [[C1]] ; %B = and i8 %A, 3 @@ -133,7 +133,7 @@ define i8 @test10(i8 %A) { define i8 @test11(i8 %A) { ; CHECK-LABEL: @test11( ; CHECK-NEXT: [[B:%.*]] = and i8 [[A:%.*]], -13 -; CHECK-NEXT: [[C:%.*]] = or i8 [[B]], 8 +; CHECK-NEXT: [[C:%.*]] = or disjoint i8 [[B]], 8 ; CHECK-NEXT: ret i8 [[C]] ; %B = or i8 %A, 12 diff --git a/llvm/test/Transforms/InstCombine/xor2.ll b/llvm/test/Transforms/InstCombine/xor2.ll index 06fd740954b99..7d12a00a8bd51 100644 --- a/llvm/test/Transforms/InstCombine/xor2.ll +++ b/llvm/test/Transforms/InstCombine/xor2.ll @@ -36,7 +36,7 @@ define i1 @test1(i32 %A) { define i32 @test2(i32 %t1) { ; CHECK-LABEL: @test2( ; CHECK-NEXT: [[OVM:%.*]] = and i32 [[T1:%.*]], 32 -; CHECK-NEXT: [[OV1101:%.*]] = or i32 [[OVM]], 8 +; CHECK-NEXT: [[OV1101:%.*]] = or disjoint i32 [[OVM]], 8 ; CHECK-NEXT: ret i32 [[OV1101]] ; %ovm = and i32 %t1, 32 @@ -48,7 +48,7 @@ define i32 @test2(i32 %t1) { define i32 @test3(i32 %t1) { ; CHECK-LABEL: @test3( ; CHECK-NEXT: [[OVM:%.*]] = and i32 [[T1:%.*]], 32 -; CHECK-NEXT: [[OV1101:%.*]] = or i32 [[OVM]], 8 +; CHECK-NEXT: [[OV1101:%.*]] = or disjoint i32 [[OVM]], 8 ; CHECK-NEXT: ret i32 [[OV1101]] ; %ovm = or i32 %t1, 145 diff --git a/llvm/test/Transforms/InstCombine/zext-or-icmp.ll b/llvm/test/Transforms/InstCombine/zext-or-icmp.ll index 9ec3ddc80c57f..585f099fd41b1 100644 --- a/llvm/test/Transforms/InstCombine/zext-or-icmp.ll +++ b/llvm/test/Transforms/InstCombine/zext-or-icmp.ll @@ -174,7 +174,7 @@ define i8 @PR49475_infloop(i32 %t0, i16 %insert, i64 %e, i8 %i162) { ; CHECK-NEXT: [[B2:%.*]] = icmp eq i16 [[INSERT:%.*]], 0 ; CHECK-NEXT: [[T1:%.*]] = or i1 [[B]], [[B2]] ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[T0]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], 140 +; CHECK-NEXT: [[TMP2:%.*]] = or disjoint i32 [[TMP1]], 140 ; CHECK-NEXT: [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64 ; CHECK-NEXT: [[XOR1:%.*]] = select i1 [[T1]], i64 [[TMP3]], i64 140 ; CHECK-NEXT: [[CONV16:%.*]] = sext i8 [[I162:%.*]] to i64 @@ -228,7 +228,7 @@ define i1 @PR51762(ptr %i, i32 %t0, i16 %t1, ptr %p, ptr %d, ptr %f, i32 %p2, i1 ; CHECK-NEXT: [[INSERT_EXT51:%.*]] = zext i32 [[I_SROA_8_0]] to i64 ; CHECK-NEXT: [[INSERT_SHIFT52:%.*]] = shl nuw i64 [[INSERT_EXT51]], 32 ; CHECK-NEXT: [[INSERT_EXT39:%.*]] = zext i32 [[SROA38]] to i64 -; CHECK-NEXT: [[INSERT_INSERT41:%.*]] = or i64 [[INSERT_SHIFT52]], [[INSERT_EXT39]] +; CHECK-NEXT: [[INSERT_INSERT41:%.*]] = or disjoint i64 [[INSERT_SHIFT52]], [[INSERT_EXT39]] ; CHECK-NEXT: [[REM:%.*]] = urem i64 [[S1]], [[INSERT_INSERT41]] ; CHECK-NEXT: [[NE:%.*]] = icmp ne i64 [[REM]], 0 ; CHECK-NEXT: [[LOR_EXT:%.*]] = zext i1 [[NE]] to i32 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll index da358c6f4d453..27b0c95873514 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll @@ -39,7 +39,7 @@ define void @test_array_load2_store2(i32 %C, i32 %D) #1 { ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { , } @llvm.experimental.vector.deinterleave2.nxv8i32( [[WIDE_VEC]]) ; CHECK-NEXT: [[TMP1:%.*]] = extractvalue { , } [[STRIDED_VEC]], 0 ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { , } [[STRIDED_VEC]], 1 -; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[OFFSET_IDX]], 1 +; CHECK-NEXT: [[TMP3:%.*]] = or disjoint i64 [[OFFSET_IDX]], 1 ; CHECK-NEXT: [[TMP4:%.*]] = add nsw [[TMP1]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP5:%.*]] = mul nsw [[TMP2]], [[BROADCAST_SPLAT2]] ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1024 x i32], ptr @CD, i64 0, i64 [[TMP3]] @@ -718,7 +718,7 @@ define void @mixed_load2_store2(i32* noalias nocapture readonly %A, i32* noalias ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { , } @llvm.experimental.vector.deinterleave2.nxv8i32( [[WIDE_VEC]]) ; CHECK-NEXT: [[TMP1:%.*]] = extractvalue { , } [[STRIDED_VEC]], 0 ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { , } [[STRIDED_VEC]], 1 -; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[OFFSET_IDX]], 1 +; CHECK-NEXT: [[TMP3:%.*]] = or disjoint i64 [[OFFSET_IDX]], 1 ; CHECK-NEXT: [[TMP4:%.*]] = mul nsw [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[STRIDED_VEC2:%.*]] = call { , } @llvm.experimental.vector.deinterleave2.nxv8i32( [[WIDE_VEC]]) ; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[STRIDED_VEC2]], 0 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll index 4803b96642afd..ae3abba27be5f 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll @@ -55,7 +55,7 @@ define dso_local void @masked_strided1(ptr noalias nocapture readonly %p, ptr no ; SCALAR_TAIL_FOLDING-NEXT: [[STRIDED_VEC:%.*]] = call { , } @llvm.experimental.vector.deinterleave2.nxv32i8( [[WIDE_MASKED_VEC]]) ; SCALAR_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = extractvalue { , } [[STRIDED_VEC]], 0 ; SCALAR_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = extractvalue { , } [[STRIDED_VEC]], 1 -; SCALAR_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = or i32 [[TMP8]], 1 +; SCALAR_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = or disjoint i32 [[TMP8]], 1 ; SCALAR_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = call @llvm.smax.nxv16i8( [[TMP11]], [[TMP12]]) ; SCALAR_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = sub zeroinitializer, [[TMP14]] ; SCALAR_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = sext i32 [[TMP13]] to i64 @@ -85,7 +85,7 @@ define dso_local void @masked_strided1(ptr noalias nocapture readonly %p, ptr no ; SCALAR_TAIL_FOLDING-NEXT: [[TMP22:%.*]] = zext nneg i32 [[MUL]] to i64 ; SCALAR_TAIL_FOLDING-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP22]] ; SCALAR_TAIL_FOLDING-NEXT: [[TMP23:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; SCALAR_TAIL_FOLDING-NEXT: [[ADD:%.*]] = or i32 [[MUL]], 1 +; SCALAR_TAIL_FOLDING-NEXT: [[ADD:%.*]] = or disjoint i32 [[MUL]], 1 ; SCALAR_TAIL_FOLDING-NEXT: [[TMP24:%.*]] = zext nneg i32 [[ADD]] to i64 ; SCALAR_TAIL_FOLDING-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP24]] ; SCALAR_TAIL_FOLDING-NEXT: [[TMP25:%.*]] = load i8, ptr [[ARRAYIDX4]], align 1 @@ -137,7 +137,7 @@ define dso_local void @masked_strided1(ptr noalias nocapture readonly %p, ptr no ; PREDICATED_TAIL_FOLDING-NEXT: [[STRIDED_VEC:%.*]] = call { , } @llvm.experimental.vector.deinterleave2.nxv32i8( [[WIDE_MASKED_VEC]]) ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = extractvalue { , } [[STRIDED_VEC]], 0 ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = extractvalue { , } [[STRIDED_VEC]], 1 -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = or i32 [[TMP7]], 1 +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = or disjoint i32 [[TMP7]], 1 ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = call @llvm.smax.nxv16i8( [[TMP11]], [[TMP12]]) ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = sub zeroinitializer, [[TMP14]] ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = sext i32 [[TMP13]] to i64 @@ -243,7 +243,7 @@ define dso_local void @masked_strided2(ptr noalias nocapture readnone %p, ptr no ; SCALAR_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[Q]], [[TMP8]] ; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0( shufflevector ( insertelement ( poison, i8 1, i64 0), poison, zeroinitializer), [[TMP9]], i32 1, shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer)) ; SCALAR_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = icmp ugt [[VEC_IND]], [[BROADCAST_SPLAT]] -; SCALAR_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = or [[TMP7]], shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) +; SCALAR_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = or disjoint [[TMP7]], shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) ; SCALAR_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = zext nneg [[TMP11]] to ; SCALAR_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[Q]], [[TMP12]] ; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0( shufflevector ( insertelement ( poison, i8 2, i64 0), poison, zeroinitializer), [[TMP13]], i32 1, [[TMP10]]) @@ -268,7 +268,7 @@ define dso_local void @masked_strided2(ptr noalias nocapture readnone %p, ptr no ; SCALAR_TAIL_FOLDING-NEXT: [[CMP1:%.*]] = icmp ugt i32 [[IX_012]], [[CONV]] ; SCALAR_TAIL_FOLDING-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]] ; SCALAR_TAIL_FOLDING: if.then: -; SCALAR_TAIL_FOLDING-NEXT: [[ADD:%.*]] = or i32 [[MUL]], 1 +; SCALAR_TAIL_FOLDING-NEXT: [[ADD:%.*]] = or disjoint i32 [[MUL]], 1 ; SCALAR_TAIL_FOLDING-NEXT: [[TMP18:%.*]] = zext nneg i32 [[ADD]] to i64 ; SCALAR_TAIL_FOLDING-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i8, ptr [[Q]], i64 [[TMP18]] ; SCALAR_TAIL_FOLDING-NEXT: store i8 2, ptr [[ARRAYIDX3]], align 1 @@ -307,7 +307,7 @@ define dso_local void @masked_strided2(ptr noalias nocapture readnone %p, ptr no ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[Q]], [[TMP7]] ; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0( shufflevector ( insertelement ( poison, i8 1, i64 0), poison, zeroinitializer), [[TMP8]], i32 1, [[ACTIVE_LANE_MASK]]) ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP9:%.*]] = icmp ugt [[VEC_IND]], [[BROADCAST_SPLAT]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = or [[TMP6]], shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = or disjoint [[TMP6]], shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = zext nneg [[TMP10]] to ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[Q]], [[TMP11]] ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP9]], zeroinitializer @@ -408,7 +408,7 @@ define dso_local void @masked_strided3(ptr noalias nocapture readnone %p, ptr no ; SCALAR_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[Q]], [[TMP9]] ; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0( shufflevector ( insertelement ( poison, i8 1, i64 0), poison, zeroinitializer), [[TMP10]], i32 1, [[TMP8]]) ; SCALAR_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = icmp ugt [[VEC_IND]], [[BROADCAST_SPLAT2]] -; SCALAR_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = or [[TMP7]], shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) +; SCALAR_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = or disjoint [[TMP7]], shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) ; SCALAR_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = zext nneg [[TMP12]] to ; SCALAR_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[Q]], [[TMP13]] ; SCALAR_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0( shufflevector ( insertelement ( poison, i8 2, i64 0), poison, zeroinitializer), [[TMP14]], i32 1, [[TMP11]]) @@ -438,7 +438,7 @@ define dso_local void @masked_strided3(ptr noalias nocapture readnone %p, ptr no ; SCALAR_TAIL_FOLDING-NEXT: [[CMP4:%.*]] = icmp ugt i32 [[IX_018]], [[CONV3]] ; SCALAR_TAIL_FOLDING-NEXT: br i1 [[CMP4]], label [[IF_THEN6:%.*]], label [[FOR_INC]] ; SCALAR_TAIL_FOLDING: if.then6: -; SCALAR_TAIL_FOLDING-NEXT: [[ADD:%.*]] = or i32 [[MUL]], 1 +; SCALAR_TAIL_FOLDING-NEXT: [[ADD:%.*]] = or disjoint i32 [[MUL]], 1 ; SCALAR_TAIL_FOLDING-NEXT: [[TMP19:%.*]] = zext nneg i32 [[ADD]] to i64 ; SCALAR_TAIL_FOLDING-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i8, ptr [[Q]], i64 [[TMP19]] ; SCALAR_TAIL_FOLDING-NEXT: store i8 2, ptr [[ARRAYIDX7]], align 1 @@ -482,7 +482,7 @@ define dso_local void @masked_strided3(ptr noalias nocapture readnone %p, ptr no ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP10:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP7]], zeroinitializer ; PREDICATED_TAIL_FOLDING-NEXT: call void @llvm.masked.scatter.nxv16i8.nxv16p0( shufflevector ( insertelement ( poison, i8 1, i64 0), poison, zeroinitializer), [[TMP9]], i32 1, [[TMP10]]) ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP11:%.*]] = icmp ugt [[VEC_IND]], [[BROADCAST_SPLAT2]] -; PREDICATED_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = or [[TMP6]], shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) +; PREDICATED_TAIL_FOLDING-NEXT: [[TMP12:%.*]] = or disjoint [[TMP6]], shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP13:%.*]] = zext nneg [[TMP12]] to ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[Q]], [[TMP13]] ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP15:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP11]], zeroinitializer diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll index da508ab5f7276..794c1364356c9 100644 --- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll @@ -1336,7 +1336,7 @@ define i32 @reduction_interleave_group(i32 %n, ptr %arr) #0 { ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 1 -; CHECK-NEXT: [[TMP3:%.*]] = or i32 [[OFFSET_IDX]], 1 +; CHECK-NEXT: [[TMP3:%.*]] = or disjoint i32 [[OFFSET_IDX]], 1 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[ARR:%.*]], i32 [[TMP3]] ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 -1 ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP5]], align 4 @@ -1359,7 +1359,7 @@ define i32 @reduction_interleave_group(i32 %n, ptr %arr) #0 { ; CHECK: for.body: ; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] ; CHECK-NEXT: [[RED_PHI:%.*]] = phi i32 [ [[RED_2:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[ADD:%.*]] = or i32 [[IV]], 1 +; CHECK-NEXT: [[ADD:%.*]] = or disjoint i32 [[IV]], 1 ; CHECK-NEXT: [[GEP_0:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i32 [[ADD]] ; CHECK-NEXT: [[L_0:%.*]] = load i32, ptr [[GEP_0]], align 4 ; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i32 [[IV]] diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll index 86bb078ecf695..3d7d17dfbfb2a 100644 --- a/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll +++ b/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll @@ -16,7 +16,7 @@ define i32 @foo(ptr nocapture %A) { ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = shl nsw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX]], 2 -; CHECK-NEXT: [[TMP2:%.*]] = or i64 [[TMP1]], 4 +; CHECK-NEXT: [[TMP2:%.*]] = or disjoint i64 [[TMP1]], 4 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]] ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP2]] ; CHECK-NEXT: store i32 4, ptr [[TMP3]], align 4 diff --git a/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll b/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll index 24d9e824f63a1..2467d3acbc6c7 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll @@ -15,7 +15,7 @@ define void @foo(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) { ; SSE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; SSE-NEXT: [[TMP0:%.*]] = shl nsw i64 [[INDEX]], 1 ; SSE-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX]], 1 -; SSE-NEXT: [[TMP2:%.*]] = or i64 [[TMP1]], 8 +; SSE-NEXT: [[TMP2:%.*]] = or disjoint i64 [[TMP1]], 8 ; SSE-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP0]] ; SSE-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP2]] ; SSE-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP3]], align 4 @@ -51,11 +51,11 @@ define void @foo(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) { ; AVX1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; AVX1-NEXT: [[TMP0:%.*]] = shl nsw i64 [[INDEX]], 1 ; AVX1-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX]], 1 -; AVX1-NEXT: [[TMP2:%.*]] = or i64 [[TMP1]], 8 +; AVX1-NEXT: [[TMP2:%.*]] = or disjoint i64 [[TMP1]], 8 ; AVX1-NEXT: [[TMP3:%.*]] = shl i64 [[INDEX]], 1 -; AVX1-NEXT: [[TMP4:%.*]] = or i64 [[TMP3]], 16 +; AVX1-NEXT: [[TMP4:%.*]] = or disjoint i64 [[TMP3]], 16 ; AVX1-NEXT: [[TMP5:%.*]] = shl i64 [[INDEX]], 1 -; AVX1-NEXT: [[TMP6:%.*]] = or i64 [[TMP5]], 24 +; AVX1-NEXT: [[TMP6:%.*]] = or disjoint i64 [[TMP5]], 24 ; AVX1-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP0]] ; AVX1-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP2]] ; AVX1-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP4]] @@ -105,11 +105,11 @@ define void @foo(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) { ; AVX2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; AVX2-NEXT: [[TMP0:%.*]] = shl nsw i64 [[INDEX]], 1 ; AVX2-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX]], 1 -; AVX2-NEXT: [[TMP2:%.*]] = or i64 [[TMP1]], 16 +; AVX2-NEXT: [[TMP2:%.*]] = or disjoint i64 [[TMP1]], 16 ; AVX2-NEXT: [[TMP3:%.*]] = shl i64 [[INDEX]], 1 -; AVX2-NEXT: [[TMP4:%.*]] = or i64 [[TMP3]], 32 +; AVX2-NEXT: [[TMP4:%.*]] = or disjoint i64 [[TMP3]], 32 ; AVX2-NEXT: [[TMP5:%.*]] = shl i64 [[INDEX]], 1 -; AVX2-NEXT: [[TMP6:%.*]] = or i64 [[TMP5]], 48 +; AVX2-NEXT: [[TMP6:%.*]] = or disjoint i64 [[TMP5]], 48 ; AVX2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP0]] ; AVX2-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP2]] ; AVX2-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP4]] @@ -160,7 +160,7 @@ define void @foo(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) { ; ATOM-NEXT: [[TMP0:%.*]] = shl nuw nsw i64 [[INDVARS_IV]], 1 ; ATOM-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP0]] ; ATOM-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; ATOM-NEXT: [[TMP2:%.*]] = or i64 [[TMP0]], 1 +; ATOM-NEXT: [[TMP2:%.*]] = or disjoint i64 [[TMP0]], 1 ; ATOM-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP2]] ; ATOM-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX3]], align 4 ; ATOM-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP3]], [[TMP1]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll index fa17d9e3847da..23b22b7a7ebfd 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll @@ -293,10 +293,10 @@ define void @example3(i32 %n, ptr noalias nocapture %p, ptr noalias nocapture %q ; CHECK-NEXT: br i1 [[TMP8]], label [[PRED_STORE_IF14:%.*]], label [[PRED_STORE_CONTINUE15:%.*]] ; CHECK: pred.store.if14: ; CHECK-NEXT: [[TMP9:%.*]] = shl i64 [[INDEX]], 2 -; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP9]], 4 +; CHECK-NEXT: [[TMP10:%.*]] = or disjoint i64 [[TMP9]], 4 ; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP10]] ; CHECK-NEXT: [[TMP11:%.*]] = shl i64 [[INDEX]], 2 -; CHECK-NEXT: [[TMP12:%.*]] = or i64 [[TMP11]], 4 +; CHECK-NEXT: [[TMP12:%.*]] = or disjoint i64 [[TMP11]], 4 ; CHECK-NEXT: [[NEXT_GEP9:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP12]] ; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[NEXT_GEP9]], align 16 ; CHECK-NEXT: store i32 [[TMP13]], ptr [[NEXT_GEP5]], align 16 @@ -306,10 +306,10 @@ define void @example3(i32 %n, ptr noalias nocapture %p, ptr noalias nocapture %q ; CHECK-NEXT: br i1 [[TMP14]], label [[PRED_STORE_IF16:%.*]], label [[PRED_STORE_CONTINUE17:%.*]] ; CHECK: pred.store.if16: ; CHECK-NEXT: [[TMP15:%.*]] = shl i64 [[INDEX]], 2 -; CHECK-NEXT: [[TMP16:%.*]] = or i64 [[TMP15]], 8 +; CHECK-NEXT: [[TMP16:%.*]] = or disjoint i64 [[TMP15]], 8 ; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP16]] ; CHECK-NEXT: [[TMP17:%.*]] = shl i64 [[INDEX]], 2 -; CHECK-NEXT: [[TMP18:%.*]] = or i64 [[TMP17]], 8 +; CHECK-NEXT: [[TMP18:%.*]] = or disjoint i64 [[TMP17]], 8 ; CHECK-NEXT: [[NEXT_GEP10:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP18]] ; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[NEXT_GEP10]], align 16 ; CHECK-NEXT: store i32 [[TMP19]], ptr [[NEXT_GEP6]], align 16 @@ -319,10 +319,10 @@ define void @example3(i32 %n, ptr noalias nocapture %p, ptr noalias nocapture %q ; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_STORE_IF18:%.*]], label [[PRED_STORE_CONTINUE19]] ; CHECK: pred.store.if18: ; CHECK-NEXT: [[TMP21:%.*]] = shl i64 [[INDEX]], 2 -; CHECK-NEXT: [[TMP22:%.*]] = or i64 [[TMP21]], 12 +; CHECK-NEXT: [[TMP22:%.*]] = or disjoint i64 [[TMP21]], 12 ; CHECK-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP22]] ; CHECK-NEXT: [[TMP23:%.*]] = shl i64 [[INDEX]], 2 -; CHECK-NEXT: [[TMP24:%.*]] = or i64 [[TMP23]], 12 +; CHECK-NEXT: [[TMP24:%.*]] = or disjoint i64 [[TMP23]], 12 ; CHECK-NEXT: [[NEXT_GEP11:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP24]] ; CHECK-NEXT: [[TMP25:%.*]] = load i32, ptr [[NEXT_GEP11]], align 16 ; CHECK-NEXT: store i32 [[TMP25]], ptr [[NEXT_GEP7]], align 16 @@ -479,10 +479,10 @@ define void @example23c(ptr noalias nocapture %src, ptr noalias nocapture %dst) ; CHECK-NEXT: br i1 [[TMP8]], label [[PRED_STORE_IF11:%.*]], label [[PRED_STORE_CONTINUE12:%.*]] ; CHECK: pred.store.if11: ; CHECK-NEXT: [[TMP9:%.*]] = shl i64 [[INDEX]], 2 -; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP9]], 4 +; CHECK-NEXT: [[TMP10:%.*]] = or disjoint i64 [[TMP9]], 4 ; CHECK-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP10]] ; CHECK-NEXT: [[TMP11:%.*]] = shl i64 [[INDEX]], 1 -; CHECK-NEXT: [[TMP12:%.*]] = or i64 [[TMP11]], 2 +; CHECK-NEXT: [[TMP12:%.*]] = or disjoint i64 [[TMP11]], 2 ; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP12]] ; CHECK-NEXT: [[TMP13:%.*]] = load i16, ptr [[NEXT_GEP4]], align 2 ; CHECK-NEXT: [[TMP14:%.*]] = zext i16 [[TMP13]] to i32 @@ -494,10 +494,10 @@ define void @example23c(ptr noalias nocapture %src, ptr noalias nocapture %dst) ; CHECK-NEXT: br i1 [[TMP16]], label [[PRED_STORE_IF13:%.*]], label [[PRED_STORE_CONTINUE14:%.*]] ; CHECK: pred.store.if13: ; CHECK-NEXT: [[TMP17:%.*]] = shl i64 [[INDEX]], 2 -; CHECK-NEXT: [[TMP18:%.*]] = or i64 [[TMP17]], 8 +; CHECK-NEXT: [[TMP18:%.*]] = or disjoint i64 [[TMP17]], 8 ; CHECK-NEXT: [[NEXT_GEP9:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP18]] ; CHECK-NEXT: [[TMP19:%.*]] = shl i64 [[INDEX]], 1 -; CHECK-NEXT: [[TMP20:%.*]] = or i64 [[TMP19]], 4 +; CHECK-NEXT: [[TMP20:%.*]] = or disjoint i64 [[TMP19]], 4 ; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP20]] ; CHECK-NEXT: [[TMP21:%.*]] = load i16, ptr [[NEXT_GEP5]], align 2 ; CHECK-NEXT: [[TMP22:%.*]] = zext i16 [[TMP21]] to i32 @@ -509,10 +509,10 @@ define void @example23c(ptr noalias nocapture %src, ptr noalias nocapture %dst) ; CHECK-NEXT: br i1 [[TMP24]], label [[PRED_STORE_IF15:%.*]], label [[PRED_STORE_CONTINUE16]] ; CHECK: pred.store.if15: ; CHECK-NEXT: [[TMP25:%.*]] = shl i64 [[INDEX]], 2 -; CHECK-NEXT: [[TMP26:%.*]] = or i64 [[TMP25]], 12 +; CHECK-NEXT: [[TMP26:%.*]] = or disjoint i64 [[TMP25]], 12 ; CHECK-NEXT: [[NEXT_GEP10:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP26]] ; CHECK-NEXT: [[TMP27:%.*]] = shl i64 [[INDEX]], 1 -; CHECK-NEXT: [[TMP28:%.*]] = or i64 [[TMP27]], 6 +; CHECK-NEXT: [[TMP28:%.*]] = or disjoint i64 [[TMP27]], 6 ; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP28]] ; CHECK-NEXT: [[TMP29:%.*]] = load i16, ptr [[NEXT_GEP6]], align 2 ; CHECK-NEXT: [[TMP30:%.*]] = zext i16 [[TMP29]] to i32 diff --git a/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-accesses-masked-group.ll b/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-accesses-masked-group.ll index 15959a4037e73..95b337944853c 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-accesses-masked-group.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-accesses-masked-group.ll @@ -1169,7 +1169,7 @@ define dso_local void @masked_strided2(ptr noalias nocapture readonly %p, ptr no ; DISABLED_MASKED_STRIDED-NEXT: br label [[PRED_LOAD_CONTINUE14]] ; DISABLED_MASKED_STRIDED: pred.load.continue14: ; DISABLED_MASKED_STRIDED-NEXT: [[TMP49:%.*]] = phi <8 x i8> [ [[TMP43]], [[PRED_LOAD_CONTINUE12]] ], [ [[TMP48]], [[PRED_LOAD_IF13]] ] -; DISABLED_MASKED_STRIDED-NEXT: [[TMP50:%.*]] = or <8 x i32> [[TMP1]], +; DISABLED_MASKED_STRIDED-NEXT: [[TMP50:%.*]] = or disjoint <8 x i32> [[TMP1]], ; DISABLED_MASKED_STRIDED-NEXT: [[TMP51:%.*]] = extractelement <8 x i1> [[TMP0]], i64 0 ; DISABLED_MASKED_STRIDED-NEXT: br i1 [[TMP51]], label [[PRED_LOAD_IF15:%.*]], label [[PRED_LOAD_CONTINUE16:%.*]] ; DISABLED_MASKED_STRIDED: pred.load.if15: @@ -1419,7 +1419,7 @@ define dso_local void @masked_strided2(ptr noalias nocapture readonly %p, ptr no ; ENABLED_MASKED_STRIDED-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[TMP2]], i32 1, <16 x i1> [[INTERLEAVED_MASK]], <16 x i8> poison) ; ENABLED_MASKED_STRIDED-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <16 x i8> [[WIDE_MASKED_VEC]], <16 x i8> poison, <8 x i32> ; ENABLED_MASKED_STRIDED-NEXT: [[STRIDED_VEC1:%.*]] = shufflevector <16 x i8> [[WIDE_MASKED_VEC]], <16 x i8> poison, <8 x i32> -; ENABLED_MASKED_STRIDED-NEXT: [[TMP3:%.*]] = or i32 [[TMP1]], 1 +; ENABLED_MASKED_STRIDED-NEXT: [[TMP3:%.*]] = or disjoint i32 [[TMP1]], 1 ; ENABLED_MASKED_STRIDED-NEXT: [[TMP4:%.*]] = call <8 x i8> @llvm.smax.v8i8(<8 x i8> [[STRIDED_VEC]], <8 x i8> [[STRIDED_VEC1]]) ; ENABLED_MASKED_STRIDED-NEXT: [[TMP5:%.*]] = sub <8 x i8> zeroinitializer, [[TMP4]] ; ENABLED_MASKED_STRIDED-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[Q:%.*]], i32 [[TMP3]] @@ -1576,7 +1576,7 @@ define dso_local void @masked_strided2_reverse(ptr noalias nocapture readonly %p ; DISABLED_MASKED_STRIDED-NEXT: br label [[PRED_LOAD_CONTINUE14]] ; DISABLED_MASKED_STRIDED: pred.load.continue14: ; DISABLED_MASKED_STRIDED-NEXT: [[TMP49:%.*]] = phi <8 x i8> [ [[TMP43]], [[PRED_LOAD_CONTINUE12]] ], [ [[TMP48]], [[PRED_LOAD_IF13]] ] -; DISABLED_MASKED_STRIDED-NEXT: [[TMP50:%.*]] = or <8 x i32> [[TMP1]], +; DISABLED_MASKED_STRIDED-NEXT: [[TMP50:%.*]] = or disjoint <8 x i32> [[TMP1]], ; DISABLED_MASKED_STRIDED-NEXT: [[TMP51:%.*]] = extractelement <8 x i1> [[TMP0]], i64 0 ; DISABLED_MASKED_STRIDED-NEXT: br i1 [[TMP51]], label [[PRED_LOAD_IF15:%.*]], label [[PRED_LOAD_CONTINUE16:%.*]] ; DISABLED_MASKED_STRIDED: pred.load.if15: @@ -1901,7 +1901,7 @@ define dso_local void @masked_strided2_reverse(ptr noalias nocapture readonly %p ; ENABLED_MASKED_STRIDED-NEXT: br label [[PRED_LOAD_CONTINUE14]] ; ENABLED_MASKED_STRIDED: pred.load.continue14: ; ENABLED_MASKED_STRIDED-NEXT: [[TMP49:%.*]] = phi <8 x i8> [ [[TMP43]], [[PRED_LOAD_CONTINUE12]] ], [ [[TMP48]], [[PRED_LOAD_IF13]] ] -; ENABLED_MASKED_STRIDED-NEXT: [[TMP50:%.*]] = or <8 x i32> [[TMP1]], +; ENABLED_MASKED_STRIDED-NEXT: [[TMP50:%.*]] = or disjoint <8 x i32> [[TMP1]], ; ENABLED_MASKED_STRIDED-NEXT: [[TMP51:%.*]] = extractelement <8 x i1> [[TMP0]], i64 0 ; ENABLED_MASKED_STRIDED-NEXT: br i1 [[TMP51]], label [[PRED_LOAD_IF15:%.*]], label [[PRED_LOAD_CONTINUE16:%.*]] ; ENABLED_MASKED_STRIDED: pred.load.if15: @@ -2296,7 +2296,7 @@ define dso_local void @masked_strided2_unknown_tc(ptr noalias nocapture readonly ; DISABLED_MASKED_STRIDED-NEXT: br label [[PRED_LOAD_CONTINUE16]] ; DISABLED_MASKED_STRIDED: pred.load.continue16: ; DISABLED_MASKED_STRIDED-NEXT: [[TMP51:%.*]] = phi <8 x i8> [ [[TMP45]], [[PRED_LOAD_CONTINUE14]] ], [ [[TMP50]], [[PRED_LOAD_IF15]] ] -; DISABLED_MASKED_STRIDED-NEXT: [[TMP52:%.*]] = or <8 x i32> [[TMP2]], +; DISABLED_MASKED_STRIDED-NEXT: [[TMP52:%.*]] = or disjoint <8 x i32> [[TMP2]], ; DISABLED_MASKED_STRIDED-NEXT: [[TMP53:%.*]] = extractelement <8 x i1> [[TMP3]], i64 0 ; DISABLED_MASKED_STRIDED-NEXT: br i1 [[TMP53]], label [[PRED_LOAD_IF17:%.*]], label [[PRED_LOAD_CONTINUE18:%.*]] ; DISABLED_MASKED_STRIDED: pred.load.if17: @@ -2555,7 +2555,7 @@ define dso_local void @masked_strided2_unknown_tc(ptr noalias nocapture readonly ; ENABLED_MASKED_STRIDED-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[TMP3]], i32 1, <16 x i1> [[INTERLEAVED_MASK]], <16 x i8> poison) ; ENABLED_MASKED_STRIDED-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <16 x i8> [[WIDE_MASKED_VEC]], <16 x i8> poison, <8 x i32> ; ENABLED_MASKED_STRIDED-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <16 x i8> [[WIDE_MASKED_VEC]], <16 x i8> poison, <8 x i32> -; ENABLED_MASKED_STRIDED-NEXT: [[TMP5:%.*]] = or i32 [[TMP2]], 1 +; ENABLED_MASKED_STRIDED-NEXT: [[TMP5:%.*]] = or disjoint i32 [[TMP2]], 1 ; ENABLED_MASKED_STRIDED-NEXT: [[TMP6:%.*]] = call <8 x i8> @llvm.smax.v8i8(<8 x i8> [[STRIDED_VEC]], <8 x i8> [[STRIDED_VEC3]]) ; ENABLED_MASKED_STRIDED-NEXT: [[TMP7:%.*]] = sub <8 x i8> zeroinitializer, [[TMP6]] ; ENABLED_MASKED_STRIDED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[Q:%.*]], i32 [[TMP5]] @@ -2732,7 +2732,7 @@ define dso_local void @unconditional_masked_strided2_unknown_tc(ptr noalias noca ; DISABLED_MASKED_STRIDED-NEXT: br label [[PRED_LOAD_CONTINUE14]] ; DISABLED_MASKED_STRIDED: pred.load.continue14: ; DISABLED_MASKED_STRIDED-NEXT: [[TMP49:%.*]] = phi <8 x i8> [ [[TMP43]], [[PRED_LOAD_CONTINUE12]] ], [ [[TMP48]], [[PRED_LOAD_IF13]] ] -; DISABLED_MASKED_STRIDED-NEXT: [[TMP50:%.*]] = or <8 x i32> [[TMP1]], +; DISABLED_MASKED_STRIDED-NEXT: [[TMP50:%.*]] = or disjoint <8 x i32> [[TMP1]], ; DISABLED_MASKED_STRIDED-NEXT: [[TMP51:%.*]] = extractelement <8 x i1> [[TMP0]], i64 0 ; DISABLED_MASKED_STRIDED-NEXT: br i1 [[TMP51]], label [[PRED_LOAD_IF15:%.*]], label [[PRED_LOAD_CONTINUE16:%.*]] ; DISABLED_MASKED_STRIDED: pred.load.if15: @@ -2989,7 +2989,7 @@ define dso_local void @unconditional_masked_strided2_unknown_tc(ptr noalias noca ; ENABLED_MASKED_STRIDED-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[TMP2]], i32 1, <16 x i1> [[INTERLEAVED_MASK]], <16 x i8> poison) ; ENABLED_MASKED_STRIDED-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <16 x i8> [[WIDE_MASKED_VEC]], <16 x i8> poison, <8 x i32> ; ENABLED_MASKED_STRIDED-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <16 x i8> [[WIDE_MASKED_VEC]], <16 x i8> poison, <8 x i32> -; ENABLED_MASKED_STRIDED-NEXT: [[TMP3:%.*]] = or i32 [[TMP1]], 1 +; ENABLED_MASKED_STRIDED-NEXT: [[TMP3:%.*]] = or disjoint i32 [[TMP1]], 1 ; ENABLED_MASKED_STRIDED-NEXT: [[TMP4:%.*]] = call <8 x i8> @llvm.smax.v8i8(<8 x i8> [[STRIDED_VEC]], <8 x i8> [[STRIDED_VEC3]]) ; ENABLED_MASKED_STRIDED-NEXT: [[TMP5:%.*]] = sub <8 x i8> zeroinitializer, [[TMP4]] ; ENABLED_MASKED_STRIDED-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[Q:%.*]], i32 [[TMP3]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-store-accesses-with-gaps.ll b/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-store-accesses-with-gaps.ll index ba32e5a1fe1a0..53eab9c649b42 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-store-accesses-with-gaps.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-store-accesses-with-gaps.ll @@ -46,7 +46,7 @@ define dso_local void @test1(ptr noalias nocapture %points, ptr noalias nocaptur ; DISABLED_MASKED_STRIDED-NEXT: store i16 [[TMP13]], ptr [[TMP9]], align 2 ; DISABLED_MASKED_STRIDED-NEXT: [[TMP14:%.*]] = getelementptr inbounds i16, ptr [[Y:%.*]], i64 [[INDEX]] ; DISABLED_MASKED_STRIDED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i16>, ptr [[TMP14]], align 2 -; DISABLED_MASKED_STRIDED-NEXT: [[TMP15:%.*]] = or <4 x i64> [[TMP1]], +; DISABLED_MASKED_STRIDED-NEXT: [[TMP15:%.*]] = or disjoint <4 x i64> [[TMP1]], ; DISABLED_MASKED_STRIDED-NEXT: [[TMP16:%.*]] = extractelement <4 x i64> [[TMP15]], i64 0 ; DISABLED_MASKED_STRIDED-NEXT: [[TMP17:%.*]] = getelementptr inbounds i16, ptr [[POINTS]], i64 [[TMP16]] ; DISABLED_MASKED_STRIDED-NEXT: [[TMP18:%.*]] = extractelement <4 x i64> [[TMP15]], i64 1 @@ -81,7 +81,7 @@ define dso_local void @test1(ptr noalias nocapture %points, ptr noalias nocaptur ; ENABLED_MASKED_STRIDED-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[INDEX]], 2 ; ENABLED_MASKED_STRIDED-NEXT: [[TMP2:%.*]] = getelementptr inbounds i16, ptr [[Y:%.*]], i64 [[INDEX]] ; ENABLED_MASKED_STRIDED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i16>, ptr [[TMP2]], align 2 -; ENABLED_MASKED_STRIDED-NEXT: [[TMP3:%.*]] = or i64 [[TMP1]], 1 +; ENABLED_MASKED_STRIDED-NEXT: [[TMP3:%.*]] = or disjoint i64 [[TMP1]], 1 ; ENABLED_MASKED_STRIDED-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[INVARIANT_GEP]], i64 [[TMP3]] ; ENABLED_MASKED_STRIDED-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <4 x i16> [[WIDE_LOAD]], <4 x i16> [[WIDE_LOAD1]], <16 x i32> ; ENABLED_MASKED_STRIDED-NEXT: call void @llvm.masked.store.v16i16.p0(<16 x i16> [[INTERLEAVED_VEC]], ptr [[GEP]], i32 2, <16 x i1> ) @@ -182,7 +182,7 @@ define dso_local void @test2(ptr noalias nocapture %points, i32 %numPoints, ptr ; DISABLED_MASKED_STRIDED: pred.store.continue6: ; DISABLED_MASKED_STRIDED-NEXT: [[TMP19:%.*]] = getelementptr inbounds i16, ptr [[Y:%.*]], i64 [[INDEX]] ; DISABLED_MASKED_STRIDED-NEXT: [[WIDE_MASKED_LOAD7:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr [[TMP19]], i32 2, <4 x i1> [[TMP0]], <4 x i16> poison) -; DISABLED_MASKED_STRIDED-NEXT: [[TMP20:%.*]] = or <4 x i64> [[TMP2]], +; DISABLED_MASKED_STRIDED-NEXT: [[TMP20:%.*]] = or disjoint <4 x i64> [[TMP2]], ; DISABLED_MASKED_STRIDED-NEXT: [[TMP21:%.*]] = extractelement <4 x i1> [[TMP0]], i64 0 ; DISABLED_MASKED_STRIDED-NEXT: br i1 [[TMP21]], label [[PRED_STORE_IF8:%.*]], label [[PRED_STORE_CONTINUE9:%.*]] ; DISABLED_MASKED_STRIDED: pred.store.if8: @@ -252,7 +252,7 @@ define dso_local void @test2(ptr noalias nocapture %points, i32 %numPoints, ptr ; ENABLED_MASKED_STRIDED-NEXT: [[TMP2:%.*]] = shl nsw i64 [[INDEX]], 2 ; ENABLED_MASKED_STRIDED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i16, ptr [[Y:%.*]], i64 [[INDEX]] ; ENABLED_MASKED_STRIDED-NEXT: [[WIDE_MASKED_LOAD3:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr [[TMP3]], i32 2, <4 x i1> [[TMP0]], <4 x i16> poison) -; ENABLED_MASKED_STRIDED-NEXT: [[TMP4:%.*]] = or i64 [[TMP2]], 1 +; ENABLED_MASKED_STRIDED-NEXT: [[TMP4:%.*]] = or disjoint i64 [[TMP2]], 1 ; ENABLED_MASKED_STRIDED-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[INVARIANT_GEP]], i64 [[TMP4]] ; ENABLED_MASKED_STRIDED-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <4 x i16> [[WIDE_MASKED_LOAD]], <4 x i16> [[WIDE_MASKED_LOAD3]], <16 x i32> ; ENABLED_MASKED_STRIDED-NEXT: [[INTERLEAVED_MASK:%.*]] = shufflevector <4 x i1> [[TMP0]], <4 x i1> poison, <16 x i32> diff --git a/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll b/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll index 935b08bf14951..5f2f83e19ab5b 100644 --- a/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll +++ b/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll @@ -313,13 +313,13 @@ for.end: ; INTER: %[[I0:.+]] = shl i64 %index, 4 ; INTER: %next.gep = getelementptr i8, ptr %a, i64 %[[I0]] ; INTER: %[[S1:.+]] = shl i64 %index, 4 -; INTER: %[[I1:.+]] = or i64 %[[S1]], 16 +; INTER: %[[I1:.+]] = or disjoint i64 %[[S1]], 16 ; INTER: %next.gep2 = getelementptr i8, ptr %a, i64 %[[I1]] ; INTER: %[[S2:.+]] = shl i64 %index, 4 -; INTER: %[[I2:.+]] = or i64 %[[S2]], 32 +; INTER: %[[I2:.+]] = or disjoint i64 %[[S2]], 32 ; INTER: %next.gep3 = getelementptr i8, ptr %a, i64 %[[I2]] ; INTER: %[[S3:.+]] = shl i64 %index, 4 -; INTER: %[[I3:.+]] = or i64 %[[S3]], 48 +; INTER: %[[I3:.+]] = or disjoint i64 %[[S3]], 48 ; INTER: %next.gep4 = getelementptr i8, ptr %a, i64 %[[I3]] ; INTER: br i1 {{.*}}, label %middle.block, label %vector.body ; @@ -363,13 +363,13 @@ for.end: ; CHECK: [[SHL1:%.+]] = shl i64 %index, 4 ; CHECK: %next.gep = getelementptr i8, ptr %a, i64 [[SHL1]] ; CHECK: [[SHL2:%.+]] = shl i64 %index, 4 -; CHECK: %[[I1:.+]] = or i64 [[SHL2]], 16 +; CHECK: %[[I1:.+]] = or disjoint i64 [[SHL2]], 16 ; CHECK: %next.gep2 = getelementptr i8, ptr %a, i64 %[[I1]] ; CHECK: [[SHL3:%.+]] = shl i64 %index, 4 -; CHECK: %[[I2:.+]] = or i64 [[SHL3]], 32 +; CHECK: %[[I2:.+]] = or disjoint i64 [[SHL3]], 32 ; CHECK: %next.gep3 = getelementptr i8, ptr %a, i64 %[[I2]] ; CHECK: [[SHL4:%.+]] = shl i64 %index, 4 -; CHECK: %[[I3:.+]] = or i64 [[SHL4]], 48 +; CHECK: %[[I3:.+]] = or disjoint i64 [[SHL4]], 48 ; CHECK: %next.gep4 = getelementptr i8, ptr %a, i64 %[[I3]] ; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body ; diff --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll index 7947c7f80c137..71a1af78f7c89 100644 --- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll +++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll @@ -38,7 +38,7 @@ define void @test_array_load2_store2(i32 %C, i32 %D) { ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP0]], align 4 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> ; CHECK-NEXT: [[STRIDED_VEC1:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> -; CHECK-NEXT: [[TMP1:%.*]] = or i64 [[OFFSET_IDX]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = or disjoint i64 [[OFFSET_IDX]], 1 ; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[STRIDED_VEC]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP3:%.*]] = mul nsw <4 x i32> [[STRIDED_VEC1]], [[BROADCAST_SPLAT3]] ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1024 x i32], ptr @CD, i64 0, i64 [[TMP1]] @@ -669,7 +669,7 @@ define void @mixed_load2_store2(ptr noalias nocapture readonly %A, ptr noalias n ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP0]], align 4 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> ; CHECK-NEXT: [[STRIDED_VEC1:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> -; CHECK-NEXT: [[TMP1:%.*]] = or i64 [[OFFSET_IDX]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = or disjoint i64 [[OFFSET_IDX]], 1 ; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <4 x i32> [[STRIDED_VEC1]], [[STRIDED_VEC]] ; CHECK-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> ; CHECK-NEXT: [[STRIDED_VEC4:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> diff --git a/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-final-loop-unrolling-2.ll b/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-final-loop-unrolling-2.ll index 5f133dc27b468..5178e9f1c8e42 100644 --- a/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-final-loop-unrolling-2.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-final-loop-unrolling-2.ll @@ -31,7 +31,7 @@ define dso_local void @foo(i32 noundef %arg, ptr noundef nonnull align 4 derefer ; CHECK-NEXT: [[I3_SROA_8_0_INSERT_EXT:%.*]] = zext i32 [[I21_3:%.*]] to i64 ; CHECK-NEXT: [[I3_SROA_8_0_INSERT_SHIFT:%.*]] = shl nuw i64 [[I3_SROA_8_0_INSERT_EXT]], 32 ; CHECK-NEXT: [[I3_SROA_0_0_INSERT_EXT:%.*]] = zext i32 [[I21_2:%.*]] to i64 -; CHECK-NEXT: [[I3_SROA_0_0_INSERT_INSERT:%.*]] = or i64 [[I3_SROA_8_0_INSERT_SHIFT]], [[I3_SROA_0_0_INSERT_EXT]] +; CHECK-NEXT: [[I3_SROA_0_0_INSERT_INSERT:%.*]] = or disjoint i64 [[I3_SROA_8_0_INSERT_SHIFT]], [[I3_SROA_0_0_INSERT_EXT]] ; CHECK-NEXT: br label [[BB12]] ; CHECK: bb12: ; CHECK-NEXT: [[TMP1:%.*]] = phi i64 [ [[I3_SROA_0_0_INSERT_INSERT]], [[BB12_LOOPEXIT:%.*]] ], [ 180388626456, [[BB:%.*]] ] diff --git a/llvm/test/Transforms/PhaseOrdering/X86/loadcombine.ll b/llvm/test/Transforms/PhaseOrdering/X86/loadcombine.ll index 524688343726b..fe49ba9d61d98 100644 --- a/llvm/test/Transforms/PhaseOrdering/X86/loadcombine.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/loadcombine.ll @@ -41,8 +41,8 @@ define i32 @loadCombine_4consecutive_1243(ptr %p) { ; CHECK-NEXT: [[E4:%.*]] = zext i8 [[L4]] to i32 ; CHECK-NEXT: [[S3:%.*]] = shl nuw nsw i32 [[E3]], 16 ; CHECK-NEXT: [[S4:%.*]] = shl nuw i32 [[E4]], 24 -; CHECK-NEXT: [[O2:%.*]] = or i32 [[S3]], [[TMP1]] -; CHECK-NEXT: [[O3:%.*]] = or i32 [[O2]], [[S4]] +; CHECK-NEXT: [[O2:%.*]] = or disjoint i32 [[S3]], [[TMP1]] +; CHECK-NEXT: [[O3:%.*]] = or disjoint i32 [[O2]], [[S4]] ; CHECK-NEXT: ret i32 [[O3]] ; %p1 = getelementptr i8, ptr %p, i32 1 @@ -84,9 +84,9 @@ define i32 @loadCombine_4consecutive_1324(ptr %p) { ; CHECK-NEXT: [[S2:%.*]] = shl nuw nsw i32 [[E2]], 8 ; CHECK-NEXT: [[S3:%.*]] = shl nuw nsw i32 [[E3]], 16 ; CHECK-NEXT: [[S4:%.*]] = shl nuw i32 [[E4]], 24 -; CHECK-NEXT: [[O1:%.*]] = or i32 [[S2]], [[E1]] -; CHECK-NEXT: [[O2:%.*]] = or i32 [[O1]], [[S3]] -; CHECK-NEXT: [[O3:%.*]] = or i32 [[O2]], [[S4]] +; CHECK-NEXT: [[O1:%.*]] = or disjoint i32 [[S2]], [[E1]] +; CHECK-NEXT: [[O2:%.*]] = or disjoint i32 [[O1]], [[S3]] +; CHECK-NEXT: [[O3:%.*]] = or disjoint i32 [[O2]], [[S4]] ; CHECK-NEXT: ret i32 [[O3]] ; %p1 = getelementptr i8, ptr %p, i32 1 @@ -128,9 +128,9 @@ define i32 @loadCombine_4consecutive_1342(ptr %p) { ; CHECK-NEXT: [[S2:%.*]] = shl nuw nsw i32 [[E2]], 8 ; CHECK-NEXT: [[S3:%.*]] = shl nuw nsw i32 [[E3]], 16 ; CHECK-NEXT: [[S4:%.*]] = shl nuw i32 [[E4]], 24 -; CHECK-NEXT: [[O1:%.*]] = or i32 [[S2]], [[E1]] -; CHECK-NEXT: [[O2:%.*]] = or i32 [[O1]], [[S3]] -; CHECK-NEXT: [[O3:%.*]] = or i32 [[O2]], [[S4]] +; CHECK-NEXT: [[O1:%.*]] = or disjoint i32 [[S2]], [[E1]] +; CHECK-NEXT: [[O2:%.*]] = or disjoint i32 [[O1]], [[S3]] +; CHECK-NEXT: [[O3:%.*]] = or disjoint i32 [[O2]], [[S4]] ; CHECK-NEXT: ret i32 [[O3]] ; %p1 = getelementptr i8, ptr %p, i32 1 @@ -172,9 +172,9 @@ define i32 @loadCombine_4consecutive_1423(ptr %p) { ; CHECK-NEXT: [[S2:%.*]] = shl nuw nsw i32 [[E2]], 8 ; CHECK-NEXT: [[S3:%.*]] = shl nuw nsw i32 [[E3]], 16 ; CHECK-NEXT: [[S4:%.*]] = shl nuw i32 [[E4]], 24 -; CHECK-NEXT: [[O1:%.*]] = or i32 [[S2]], [[E1]] -; CHECK-NEXT: [[O2:%.*]] = or i32 [[O1]], [[S3]] -; CHECK-NEXT: [[O3:%.*]] = or i32 [[O2]], [[S4]] +; CHECK-NEXT: [[O1:%.*]] = or disjoint i32 [[S2]], [[E1]] +; CHECK-NEXT: [[O2:%.*]] = or disjoint i32 [[O1]], [[S3]] +; CHECK-NEXT: [[O3:%.*]] = or disjoint i32 [[O2]], [[S4]] ; CHECK-NEXT: ret i32 [[O3]] ; %p1 = getelementptr i8, ptr %p, i32 1 @@ -216,9 +216,9 @@ define i32 @loadCombine_4consecutive_1432(ptr %p) { ; CHECK-NEXT: [[S2:%.*]] = shl nuw nsw i32 [[E2]], 8 ; CHECK-NEXT: [[S3:%.*]] = shl nuw nsw i32 [[E3]], 16 ; CHECK-NEXT: [[S4:%.*]] = shl nuw i32 [[E4]], 24 -; CHECK-NEXT: [[O1:%.*]] = or i32 [[S2]], [[E1]] -; CHECK-NEXT: [[O2:%.*]] = or i32 [[O1]], [[S3]] -; CHECK-NEXT: [[O3:%.*]] = or i32 [[O2]], [[S4]] +; CHECK-NEXT: [[O1:%.*]] = or disjoint i32 [[S2]], [[E1]] +; CHECK-NEXT: [[O2:%.*]] = or disjoint i32 [[O1]], [[S3]] +; CHECK-NEXT: [[O3:%.*]] = or disjoint i32 [[O2]], [[S4]] ; CHECK-NEXT: ret i32 [[O3]] ; %p1 = getelementptr i8, ptr %p, i32 1 @@ -284,8 +284,8 @@ define i32 @loadCombine_4consecutive_2143(ptr %p) { ; CHECK-NEXT: [[E4:%.*]] = zext i8 [[L4]] to i32 ; CHECK-NEXT: [[S3:%.*]] = shl nuw nsw i32 [[E3]], 16 ; CHECK-NEXT: [[S4:%.*]] = shl nuw i32 [[E4]], 24 -; CHECK-NEXT: [[O2:%.*]] = or i32 [[S3]], [[TMP1]] -; CHECK-NEXT: [[O3:%.*]] = or i32 [[O2]], [[S4]] +; CHECK-NEXT: [[O2:%.*]] = or disjoint i32 [[S3]], [[TMP1]] +; CHECK-NEXT: [[O3:%.*]] = or disjoint i32 [[O2]], [[S4]] ; CHECK-NEXT: ret i32 [[O3]] ; %p1 = getelementptr i8, ptr %p, i32 1 @@ -383,9 +383,9 @@ define i32 @loadCombine_4consecutive_2413(ptr %p) { ; CHECK-NEXT: [[S2:%.*]] = shl nuw nsw i32 [[E2]], 8 ; CHECK-NEXT: [[S3:%.*]] = shl nuw nsw i32 [[E3]], 16 ; CHECK-NEXT: [[S4:%.*]] = shl nuw i32 [[E4]], 24 -; CHECK-NEXT: [[O1:%.*]] = or i32 [[S2]], [[E1]] -; CHECK-NEXT: [[O2:%.*]] = or i32 [[O1]], [[S3]] -; CHECK-NEXT: [[O3:%.*]] = or i32 [[O2]], [[S4]] +; CHECK-NEXT: [[O1:%.*]] = or disjoint i32 [[S2]], [[E1]] +; CHECK-NEXT: [[O2:%.*]] = or disjoint i32 [[O1]], [[S3]] +; CHECK-NEXT: [[O3:%.*]] = or disjoint i32 [[O2]], [[S4]] ; CHECK-NEXT: ret i32 [[O3]] ; %p1 = getelementptr i8, ptr %p, i32 1 @@ -427,9 +427,9 @@ define i32 @loadCombine_4consecutive_2431(ptr %p) { ; CHECK-NEXT: [[S2:%.*]] = shl nuw nsw i32 [[E2]], 8 ; CHECK-NEXT: [[S3:%.*]] = shl nuw nsw i32 [[E3]], 16 ; CHECK-NEXT: [[S4:%.*]] = shl nuw i32 [[E4]], 24 -; CHECK-NEXT: [[O1:%.*]] = or i32 [[S2]], [[E1]] -; CHECK-NEXT: [[O2:%.*]] = or i32 [[O1]], [[S3]] -; CHECK-NEXT: [[O3:%.*]] = or i32 [[O2]], [[S4]] +; CHECK-NEXT: [[O1:%.*]] = or disjoint i32 [[S2]], [[E1]] +; CHECK-NEXT: [[O2:%.*]] = or disjoint i32 [[O1]], [[S3]] +; CHECK-NEXT: [[O3:%.*]] = or disjoint i32 [[O2]], [[S4]] ; CHECK-NEXT: ret i32 [[O3]] ; %p1 = getelementptr i8, ptr %p, i32 1 @@ -471,9 +471,9 @@ define i32 @loadCombine_4consecutive_3124(ptr %p) { ; CHECK-NEXT: [[S2:%.*]] = shl nuw nsw i32 [[E2]], 8 ; CHECK-NEXT: [[S3:%.*]] = shl nuw nsw i32 [[E3]], 16 ; CHECK-NEXT: [[S4:%.*]] = shl nuw i32 [[E4]], 24 -; CHECK-NEXT: [[O1:%.*]] = or i32 [[S2]], [[E1]] -; CHECK-NEXT: [[O2:%.*]] = or i32 [[O1]], [[S3]] -; CHECK-NEXT: [[O3:%.*]] = or i32 [[O2]], [[S4]] +; CHECK-NEXT: [[O1:%.*]] = or disjoint i32 [[S2]], [[E1]] +; CHECK-NEXT: [[O2:%.*]] = or disjoint i32 [[O1]], [[S3]] +; CHECK-NEXT: [[O3:%.*]] = or disjoint i32 [[O2]], [[S4]] ; CHECK-NEXT: ret i32 [[O3]] ; %p1 = getelementptr i8, ptr %p, i32 1 @@ -515,9 +515,9 @@ define i32 @loadCombine_4consecutive_3142(ptr %p) { ; CHECK-NEXT: [[S2:%.*]] = shl nuw nsw i32 [[E2]], 8 ; CHECK-NEXT: [[S3:%.*]] = shl nuw nsw i32 [[E3]], 16 ; CHECK-NEXT: [[S4:%.*]] = shl nuw i32 [[E4]], 24 -; CHECK-NEXT: [[O1:%.*]] = or i32 [[S2]], [[E1]] -; CHECK-NEXT: [[O2:%.*]] = or i32 [[O1]], [[S3]] -; CHECK-NEXT: [[O3:%.*]] = or i32 [[O2]], [[S4]] +; CHECK-NEXT: [[O1:%.*]] = or disjoint i32 [[S2]], [[E1]] +; CHECK-NEXT: [[O2:%.*]] = or disjoint i32 [[O1]], [[S3]] +; CHECK-NEXT: [[O3:%.*]] = or disjoint i32 [[O2]], [[S4]] ; CHECK-NEXT: ret i32 [[O3]] ; %p1 = getelementptr i8, ptr %p, i32 1 @@ -611,8 +611,8 @@ define i32 @loadCombine_4consecutive_3412(ptr %p) { ; CHECK-NEXT: [[E1:%.*]] = zext i8 [[L1]] to i32 ; CHECK-NEXT: [[E2:%.*]] = zext i8 [[L2]] to i32 ; CHECK-NEXT: [[S2:%.*]] = shl nuw nsw i32 [[E2]], 8 -; CHECK-NEXT: [[O2:%.*]] = or i32 [[S2]], [[E1]] -; CHECK-NEXT: [[O3:%.*]] = or i32 [[O2]], [[TMP2]] +; CHECK-NEXT: [[O2:%.*]] = or disjoint i32 [[S2]], [[E1]] +; CHECK-NEXT: [[O3:%.*]] = or disjoint i32 [[O2]], [[TMP2]] ; CHECK-NEXT: ret i32 [[O3]] ; %p1 = getelementptr i8, ptr %p, i32 1 @@ -682,9 +682,9 @@ define i32 @loadCombine_4consecutive_4123(ptr %p) { ; CHECK-NEXT: [[S2:%.*]] = shl nuw nsw i32 [[E2]], 8 ; CHECK-NEXT: [[S3:%.*]] = shl nuw nsw i32 [[E3]], 16 ; CHECK-NEXT: [[S4:%.*]] = shl nuw i32 [[E4]], 24 -; CHECK-NEXT: [[O1:%.*]] = or i32 [[S2]], [[E1]] -; CHECK-NEXT: [[O2:%.*]] = or i32 [[O1]], [[S3]] -; CHECK-NEXT: [[O3:%.*]] = or i32 [[O2]], [[S4]] +; CHECK-NEXT: [[O1:%.*]] = or disjoint i32 [[S2]], [[E1]] +; CHECK-NEXT: [[O2:%.*]] = or disjoint i32 [[O1]], [[S3]] +; CHECK-NEXT: [[O3:%.*]] = or disjoint i32 [[O2]], [[S4]] ; CHECK-NEXT: ret i32 [[O3]] ; %p1 = getelementptr i8, ptr %p, i32 1 @@ -726,9 +726,9 @@ define i32 @loadCombine_4consecutive_4132(ptr %p) { ; CHECK-NEXT: [[S2:%.*]] = shl nuw nsw i32 [[E2]], 8 ; CHECK-NEXT: [[S3:%.*]] = shl nuw nsw i32 [[E3]], 16 ; CHECK-NEXT: [[S4:%.*]] = shl nuw i32 [[E4]], 24 -; CHECK-NEXT: [[O1:%.*]] = or i32 [[S2]], [[E1]] -; CHECK-NEXT: [[O2:%.*]] = or i32 [[O1]], [[S3]] -; CHECK-NEXT: [[O3:%.*]] = or i32 [[O2]], [[S4]] +; CHECK-NEXT: [[O1:%.*]] = or disjoint i32 [[S2]], [[E1]] +; CHECK-NEXT: [[O2:%.*]] = or disjoint i32 [[O1]], [[S3]] +; CHECK-NEXT: [[O3:%.*]] = or disjoint i32 [[O2]], [[S4]] ; CHECK-NEXT: ret i32 [[O3]] ; %p1 = getelementptr i8, ptr %p, i32 1 @@ -770,9 +770,9 @@ define i32 @loadCombine_4consecutive_4213(ptr %p) { ; CHECK-NEXT: [[S2:%.*]] = shl nuw nsw i32 [[E2]], 8 ; CHECK-NEXT: [[S3:%.*]] = shl nuw nsw i32 [[E3]], 16 ; CHECK-NEXT: [[S4:%.*]] = shl nuw i32 [[E4]], 24 -; CHECK-NEXT: [[O1:%.*]] = or i32 [[S2]], [[E1]] -; CHECK-NEXT: [[O2:%.*]] = or i32 [[O1]], [[S3]] -; CHECK-NEXT: [[O3:%.*]] = or i32 [[O2]], [[S4]] +; CHECK-NEXT: [[O1:%.*]] = or disjoint i32 [[S2]], [[E1]] +; CHECK-NEXT: [[O2:%.*]] = or disjoint i32 [[O1]], [[S3]] +; CHECK-NEXT: [[O3:%.*]] = or disjoint i32 [[O2]], [[S4]] ; CHECK-NEXT: ret i32 [[O3]] ; %p1 = getelementptr i8, ptr %p, i32 1 @@ -814,9 +814,9 @@ define i32 @loadCombine_4consecutive_4231(ptr %p) { ; CHECK-NEXT: [[S2:%.*]] = shl nuw nsw i32 [[E2]], 8 ; CHECK-NEXT: [[S3:%.*]] = shl nuw nsw i32 [[E3]], 16 ; CHECK-NEXT: [[S4:%.*]] = shl nuw i32 [[E4]], 24 -; CHECK-NEXT: [[O1:%.*]] = or i32 [[S2]], [[E1]] -; CHECK-NEXT: [[O2:%.*]] = or i32 [[O1]], [[S3]] -; CHECK-NEXT: [[O3:%.*]] = or i32 [[O2]], [[S4]] +; CHECK-NEXT: [[O1:%.*]] = or disjoint i32 [[S2]], [[E1]] +; CHECK-NEXT: [[O2:%.*]] = or disjoint i32 [[O1]], [[S3]] +; CHECK-NEXT: [[O3:%.*]] = or disjoint i32 [[O2]], [[S4]] ; CHECK-NEXT: ret i32 [[O3]] ; %p1 = getelementptr i8, ptr %p, i32 1 @@ -854,8 +854,8 @@ define i32 @loadCombine_4consecutive_4312(ptr %p) { ; CHECK-NEXT: [[E1:%.*]] = zext i8 [[L1]] to i32 ; CHECK-NEXT: [[E2:%.*]] = zext i8 [[L2]] to i32 ; CHECK-NEXT: [[S2:%.*]] = shl nuw nsw i32 [[E2]], 8 -; CHECK-NEXT: [[O2:%.*]] = or i32 [[S2]], [[E1]] -; CHECK-NEXT: [[O3:%.*]] = or i32 [[O2]], [[TMP2]] +; CHECK-NEXT: [[O2:%.*]] = or disjoint i32 [[S2]], [[E1]] +; CHECK-NEXT: [[O3:%.*]] = or disjoint i32 [[O2]], [[TMP2]] ; CHECK-NEXT: ret i32 [[O3]] ; %p1 = getelementptr i8, ptr %p, i32 1 diff --git a/llvm/test/Transforms/PhaseOrdering/X86/pixel-splat.ll b/llvm/test/Transforms/PhaseOrdering/X86/pixel-splat.ll index 40ea616331d46..cee2666bc5060 100644 --- a/llvm/test/Transforms/PhaseOrdering/X86/pixel-splat.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/pixel-splat.ll @@ -40,8 +40,8 @@ define void @loop_or(ptr noalias %pIn, ptr noalias %pOut, i32 %s) { ; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i8> [[WIDE_LOAD4]] to <4 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = mul nuw nsw <4 x i32> [[TMP2]], ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw nsw <4 x i32> [[TMP3]], -; CHECK-NEXT: [[TMP6:%.*]] = or <4 x i32> [[TMP4]], -; CHECK-NEXT: [[TMP7:%.*]] = or <4 x i32> [[TMP5]], +; CHECK-NEXT: [[TMP6:%.*]] = or disjoint <4 x i32> [[TMP4]], +; CHECK-NEXT: [[TMP7:%.*]] = or disjoint <4 x i32> [[TMP5]], ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[POUT:%.*]], i64 [[INDEX]] ; CHECK-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i64 4 @@ -61,7 +61,7 @@ define void @loop_or(ptr noalias %pIn, ptr noalias %pOut, i32 %s) { ; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 ; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP11]] to i32 ; CHECK-NEXT: [[OR2:%.*]] = mul nuw nsw i32 [[CONV]], 65793 -; CHECK-NEXT: [[OR3:%.*]] = or i32 [[OR2]], -16777216 +; CHECK-NEXT: [[OR3:%.*]] = or disjoint i32 [[OR2]], -16777216 ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, ptr [[POUT]], i64 [[INDVARS_IV]] ; CHECK-NEXT: store i32 [[OR3]], ptr [[ARRAYIDX5]], align 4 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/loadorder.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/loadorder.ll index c4076385d8ed5..70eb21fcbf123 100644 --- a/llvm/test/Transforms/SLPVectorizer/AArch64/loadorder.ll +++ b/llvm/test/Transforms/SLPVectorizer/AArch64/loadorder.ll @@ -146,7 +146,7 @@ define i16 @reduce_blockstrided2(ptr nocapture noundef readonly %x, ptr nocaptur ; CHECK-NEXT: [[IDXPROM7:%.*]] = sext i32 [[MUL]] to i64 ; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, ptr [[X]], i64 [[IDXPROM7]] ; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr [[ARRAYIDX8]], align 2 -; CHECK-NEXT: [[ADD10:%.*]] = or i32 [[MUL]], 1 +; CHECK-NEXT: [[ADD10:%.*]] = or disjoint i32 [[MUL]], 1 ; CHECK-NEXT: [[IDXPROM11:%.*]] = sext i32 [[ADD10]] to i64 ; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i16, ptr [[X]], i64 [[IDXPROM11]] ; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[ARRAYIDX12]], align 2 @@ -688,7 +688,7 @@ define void @store_blockstrided3(ptr nocapture noundef readonly %x, ptr nocaptur ; CHECK-NEXT: [[IDXPROM11:%.*]] = sext i32 [[MUL]] to i64 ; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, ptr [[X]], i64 [[IDXPROM11]] ; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX12]], align 4 -; CHECK-NEXT: [[ADD14:%.*]] = or i32 [[MUL]], 1 +; CHECK-NEXT: [[ADD14:%.*]] = or disjoint i32 [[MUL]], 1 ; CHECK-NEXT: [[IDXPROM15:%.*]] = sext i32 [[ADD14]] to i64 ; CHECK-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds i32, ptr [[X]], i64 [[IDXPROM15]] ; CHECK-NEXT: [[MUL21:%.*]] = mul nsw i32 [[STRIDE]], 3