diff --git a/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll b/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll index 4bd190bf751ed..6fc392a0aa563 100644 --- a/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll +++ b/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll @@ -1480,6 +1480,63 @@ define <2 x i64> @and64imm8h_lsl8(<2 x i64> %a) { ret <2 x i64> %tmp1 } +define <8 x i16> @bic_shifted_knownbits(<8 x i16> %v) { +; CHECK-LABEL: bic_shifted_knownbits: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: movi v1.8h, #1 +; CHECK-NEXT: ushr v0.8h, v0.8h, #9 +; CHECK-NEXT: and v0.16b, v0.16b, v1.16b +; CHECK-NEXT: ret +entry: + %vshr_n = lshr <8 x i16> %v, + %and.i = and <8 x i16> %vshr_n, + ret <8 x i16> %and.i +} + +define <8 x i32> @bic_shifted_knownbits2(<8 x i16> %v) { +; CHECK-LABEL: bic_shifted_knownbits2: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov w8, #-1048321 // =0xfff000ff +; CHECK-NEXT: ushll2 v1.4s, v0.8h, #0 +; CHECK-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-NEXT: dup v2.4s, w8 +; CHECK-NEXT: and v1.16b, v1.16b, v2.16b +; CHECK-NEXT: and v0.16b, v0.16b, v2.16b +; CHECK-NEXT: ret +entry: + %vshr_n = zext <8 x i16> %v to <8 x i32> + %and.i = and <8 x i32> %vshr_n, + ret <8 x i32> %and.i +} + +define <8 x i32> @bic_shifted_knownbits3(<8 x i16> %v) { +; CHECK-LABEL: bic_shifted_knownbits3: +; CHECK: // %bb.0: +; CHECK-NEXT: bic v0.8h, #255, lsl #8 +; CHECK-NEXT: ushll2 v1.4s, v0.8h, #0 +; CHECK-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-NEXT: ret + %a = and <8 x i16> %v, + %and.i = zext <8 x i16> %a to <8 x i32> + ret <8 x i32> %and.i +} + + +define <8 x i32> @bic_shifted_knownbits4(<8 x i32> %v) { +; CHECK-LABEL: bic_shifted_knownbits4: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: movi v2.2d, #0xffff0000ffff0000 +; CHECK-NEXT: shl v0.4s, v0.4s, #8 +; CHECK-NEXT: shl v1.4s, v1.4s, #8 +; CHECK-NEXT: and v0.16b, v0.16b, v2.16b +; CHECK-NEXT: and v1.16b, v1.16b, v2.16b +; CHECK-NEXT: ret +entry: + %vshr_n = shl <8 x i32> %v, + %and.i = and <8 x i32> %vshr_n, + ret <8 x i32> %and.i +} + define <8 x i8> @orr8imm2s_lsl0(<8 x i8> %a) { ; CHECK-LABEL: orr8imm2s_lsl0: ; CHECK: // %bb.0: diff --git a/llvm/test/CodeGen/AArch64/shiftregister-from-and.ll b/llvm/test/CodeGen/AArch64/shiftregister-from-and.ll index 91011ec66048f..ec4e3b3e42b7f 100644 --- a/llvm/test/CodeGen/AArch64/shiftregister-from-and.ll +++ b/llvm/test/CodeGen/AArch64/shiftregister-from-and.ll @@ -21,7 +21,7 @@ define i64 @and_shiftedreg_from_and(i64 %a, i64 %b) { define i64 @bic_shiftedreg_from_and(i64 %a, i64 %b) { ; CHECK-LABEL: bic_shiftedreg_from_and: ; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, #16777215 +; CHECK-NEXT: mov w8, #16777215 // =0xffffff ; CHECK-NEXT: orn x8, x8, x0, asr #23 ; CHECK-NEXT: and x0, x1, x8 ; CHECK-NEXT: ret @@ -67,7 +67,7 @@ define i64 @eor_shiftedreg_from_and(i64 %a, i64 %b) { define i64 @mvn_shiftedreg_from_and(i64 %a) { ; CHECK-LABEL: mvn_shiftedreg_from_and: ; CHECK: // %bb.0: -; CHECK-NEXT: mov x8, #9007199254740991 +; CHECK-NEXT: mov x8, #9007199254740991 // =0x1fffffffffffff ; CHECK-NEXT: orn x0, x8, x0, lsl #36 ; CHECK-NEXT: ret %shl = shl i64 %a, 36 @@ -205,7 +205,7 @@ define i32 @shiftedreg_from_and_negative_oneuse2(i32 %a, i32 %b) { define i32 @shiftedreg_from_and_negative_andc1(i32 %a, i32 %b) { ; CHECK-LABEL: shiftedreg_from_and_negative_andc1: ; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, #26215 +; CHECK-NEXT: mov w8, #26215 // =0x6667 ; CHECK-NEXT: movk w8, #65510, lsl #16 ; CHECK-NEXT: and w8, w8, w0, asr #23 ; CHECK-NEXT: add w0, w8, w1 @@ -221,7 +221,7 @@ define i32 @shiftedreg_from_and_negative_andc1(i32 %a, i32 %b) { define i32 @shiftedreg_from_and_negative_andc2(i32 %a, i32 %b) { ; CHECK-LABEL: shiftedreg_from_and_negative_andc2: ; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, #-285212672 +; CHECK-NEXT: mov w8, #-285212672 // =0xef000000 ; CHECK-NEXT: and w8, w8, w0, asr #23 ; CHECK-NEXT: add w0, w8, w1 ; CHECK-NEXT: ret