diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h index f5d14905cac66..8367a12b38dcf 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -300,6 +300,16 @@ class AArch64TargetLowering : public TargetLowering { bool shouldFoldConstantShiftPairToMask(const SDNode *N, CombineLevel Level) const override; + /// Return true if it is profitable to fold a pair of shifts into a mask. + bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override { + EVT VT = Y.getValueType(); + + if (VT.isVector()) + return false; + + return VT.getScalarSizeInBits() <= 64; + } + bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT, unsigned SelectOpcode, SDValue X, SDValue Y) const override; diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h index ccf6d509313b9..d8b7d12654e5e 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.h +++ b/llvm/lib/Target/ARM/ARMISelLowering.h @@ -775,6 +775,16 @@ class VectorType; bool shouldFoldConstantShiftPairToMask(const SDNode *N, CombineLevel Level) const override; + /// Return true if it is profitable to fold a pair of shifts into a mask. + bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override { + EVT VT = Y.getValueType(); + + if (VT.isVector()) + return false; + + return VT.getScalarSizeInBits() <= 32; + } + bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT, unsigned SelectOpcode, SDValue X, SDValue Y) const override; diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index cdc97faf394ca..3009de959299b 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -3659,11 +3659,8 @@ bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const { if (VT.isVector()) return false; - // 64-bit shifts on 32-bit targets produce really bad bloated code. - if (VT == MVT::i64 && !Subtarget.is64Bit()) - return false; - - return true; + unsigned MaxWidth = Subtarget.is64Bit() ? 64 : 32; + return VT.getScalarSizeInBits() <= MaxWidth; } TargetLowering::ShiftLegalizationStrategy diff --git a/llvm/test/CodeGen/AArch64/and-mask-variable.ll b/llvm/test/CodeGen/AArch64/and-mask-variable.ll new file mode 100644 index 0000000000000..f41cdc6dd241b --- /dev/null +++ b/llvm/test/CodeGen/AArch64/and-mask-variable.ll @@ -0,0 +1,80 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64-none-elf -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc -mtriple=aarch64-none-elf -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI + +define i32 @mask_pair(i32 %x, i32 %y) { +; CHECK-SD-LABEL: mask_pair: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: lsr w8, w0, w1 +; CHECK-SD-NEXT: lsl w0, w8, w1 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: mask_pair: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #-1 // =0xffffffff +; CHECK-GI-NEXT: lsl w8, w8, w1 +; CHECK-GI-NEXT: and w0, w8, w0 +; CHECK-GI-NEXT: ret + %shl = shl nsw i32 -1, %y + %and = and i32 %shl, %x + ret i32 %and +} + +define i64 @mask_pair_64(i64 %x, i64 %y) { +; CHECK-SD-LABEL: mask_pair_64: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: lsr x8, x0, x1 +; CHECK-SD-NEXT: lsl x0, x8, x1 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: mask_pair_64: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov x8, #-1 // =0xffffffffffffffff +; CHECK-GI-NEXT: lsl x8, x8, x1 +; CHECK-GI-NEXT: and x0, x8, x0 +; CHECK-GI-NEXT: ret + %shl = shl nsw i64 -1, %y + %and = and i64 %shl, %x + ret i64 %and +} + +define i128 @mask_pair_128(i128 %x, i128 %y) { +; CHECK-SD-LABEL: mask_pair_128: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov x8, #-1 // =0xffffffffffffffff +; CHECK-SD-NEXT: mvn w9, w2 +; CHECK-SD-NEXT: mov x10, #9223372036854775807 // =0x7fffffffffffffff +; CHECK-SD-NEXT: lsl x8, x8, x2 +; CHECK-SD-NEXT: lsr x9, x10, x9 +; CHECK-SD-NEXT: tst x2, #0x40 +; CHECK-SD-NEXT: orr x9, x8, x9 +; CHECK-SD-NEXT: csel x9, x8, x9, ne +; CHECK-SD-NEXT: csel x8, xzr, x8, ne +; CHECK-SD-NEXT: and x0, x8, x0 +; CHECK-SD-NEXT: and x1, x9, x1 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: mask_pair_128: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #64 // =0x40 +; CHECK-GI-NEXT: mov x9, #-1 // =0xffffffffffffffff +; CHECK-GI-NEXT: sub x10, x2, #64 +; CHECK-GI-NEXT: sub x8, x8, x2 +; CHECK-GI-NEXT: lsl x11, x9, x2 +; CHECK-GI-NEXT: cmp x2, #64 +; CHECK-GI-NEXT: lsr x8, x9, x8 +; CHECK-GI-NEXT: lsl x9, x9, x10 +; CHECK-GI-NEXT: csel x10, x11, xzr, lo +; CHECK-GI-NEXT: orr x8, x8, x11 +; CHECK-GI-NEXT: and x0, x10, x0 +; CHECK-GI-NEXT: csel x8, x8, x9, lo +; CHECK-GI-NEXT: cmp x2, #0 +; CHECK-GI-NEXT: csinv x8, x8, xzr, ne +; CHECK-GI-NEXT: and x1, x8, x1 +; CHECK-GI-NEXT: ret + %shl = shl nsw i128 -1, %y + %and = and i128 %shl, %x + ret i128 %and +} +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK: {{.*}} diff --git a/llvm/test/CodeGen/AArch64/extract-bits.ll b/llvm/test/CodeGen/AArch64/extract-bits.ll index 8e822d19a19b9..5a96116142b51 100644 --- a/llvm/test/CodeGen/AArch64/extract-bits.ll +++ b/llvm/test/CodeGen/AArch64/extract-bits.ll @@ -532,11 +532,10 @@ define i32 @bextr64_32_b2(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind { define i32 @bextr32_c0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { ; CHECK-LABEL: bextr32_c0: ; CHECK: // %bb.0: -; CHECK-NEXT: neg w8, w2 -; CHECK-NEXT: mov w9, #-1 // =0xffffffff -; CHECK-NEXT: lsr w10, w0, w1 -; CHECK-NEXT: lsr w8, w9, w8 -; CHECK-NEXT: and w0, w8, w10 +; CHECK-NEXT: lsr w8, w0, w1 +; CHECK-NEXT: neg w9, w2 +; CHECK-NEXT: lsl w8, w8, w9 +; CHECK-NEXT: lsr w0, w8, w9 ; CHECK-NEXT: ret %shifted = lshr i32 %val, %numskipbits %numhighbits = sub i32 32, %numlowbits @@ -548,12 +547,11 @@ define i32 @bextr32_c0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { define i32 @bextr32_c1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) nounwind { ; CHECK-LABEL: bextr32_c1_indexzext: ; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, #32 // =0x20 -; CHECK-NEXT: mov w9, #-1 // =0xffffffff -; CHECK-NEXT: lsr w10, w0, w1 -; CHECK-NEXT: sub w8, w8, w2 -; CHECK-NEXT: lsr w8, w9, w8 -; CHECK-NEXT: and w0, w8, w10 +; CHECK-NEXT: lsr w8, w0, w1 +; CHECK-NEXT: mov w9, #32 // =0x20 +; CHECK-NEXT: sub w9, w9, w2 +; CHECK-NEXT: lsl w8, w8, w9 +; CHECK-NEXT: lsr w0, w8, w9 ; CHECK-NEXT: ret %skip = zext i8 %numskipbits to i32 %shifted = lshr i32 %val, %skip @@ -569,10 +567,9 @@ define i32 @bextr32_c2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind ; CHECK: // %bb.0: ; CHECK-NEXT: ldr w8, [x0] ; CHECK-NEXT: neg w9, w2 -; CHECK-NEXT: mov w10, #-1 // =0xffffffff -; CHECK-NEXT: lsr w9, w10, w9 ; CHECK-NEXT: lsr w8, w8, w1 -; CHECK-NEXT: and w0, w9, w8 +; CHECK-NEXT: lsl w8, w8, w9 +; CHECK-NEXT: lsr w0, w8, w9 ; CHECK-NEXT: ret %val = load i32, ptr %w %shifted = lshr i32 %val, %numskipbits @@ -587,11 +584,10 @@ define i32 @bextr32_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) n ; CHECK: // %bb.0: ; CHECK-NEXT: ldr w8, [x0] ; CHECK-NEXT: mov w9, #32 // =0x20 -; CHECK-NEXT: mov w10, #-1 // =0xffffffff ; CHECK-NEXT: sub w9, w9, w2 ; CHECK-NEXT: lsr w8, w8, w1 -; CHECK-NEXT: lsr w9, w10, w9 -; CHECK-NEXT: and w0, w9, w8 +; CHECK-NEXT: lsl w8, w8, w9 +; CHECK-NEXT: lsr w0, w8, w9 ; CHECK-NEXT: ret %val = load i32, ptr %w %skip = zext i8 %numskipbits to i32 @@ -606,11 +602,10 @@ define i32 @bextr32_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) n define i32 @bextr32_c4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { ; CHECK-LABEL: bextr32_c4_commutative: ; CHECK: // %bb.0: -; CHECK-NEXT: neg w8, w2 -; CHECK-NEXT: mov w9, #-1 // =0xffffffff -; CHECK-NEXT: lsr w10, w0, w1 -; CHECK-NEXT: lsr w8, w9, w8 -; CHECK-NEXT: and w0, w10, w8 +; CHECK-NEXT: lsr w8, w0, w1 +; CHECK-NEXT: neg w9, w2 +; CHECK-NEXT: lsl w8, w8, w9 +; CHECK-NEXT: lsr w0, w8, w9 ; CHECK-NEXT: ret %shifted = lshr i32 %val, %numskipbits %numhighbits = sub i32 32, %numlowbits @@ -624,11 +619,10 @@ define i32 @bextr32_c4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) define i64 @bextr64_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { ; CHECK-LABEL: bextr64_c0: ; CHECK: // %bb.0: -; CHECK-NEXT: neg x8, x2 -; CHECK-NEXT: mov x9, #-1 // =0xffffffffffffffff -; CHECK-NEXT: lsr x10, x0, x1 -; CHECK-NEXT: lsr x8, x9, x8 -; CHECK-NEXT: and x0, x8, x10 +; CHECK-NEXT: lsr x8, x0, x1 +; CHECK-NEXT: neg x9, x2 +; CHECK-NEXT: lsl x8, x8, x9 +; CHECK-NEXT: lsr x0, x8, x9 ; CHECK-NEXT: ret %shifted = lshr i64 %val, %numskipbits %numhighbits = sub i64 64, %numlowbits @@ -640,13 +634,12 @@ define i64 @bextr64_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { define i64 @bextr64_c1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) nounwind { ; CHECK-LABEL: bextr64_c1_indexzext: ; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, #64 // =0x40 -; CHECK-NEXT: mov x9, #-1 // =0xffffffffffffffff ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: lsr x10, x0, x1 -; CHECK-NEXT: sub w8, w8, w2 -; CHECK-NEXT: lsr x8, x9, x8 -; CHECK-NEXT: and x0, x8, x10 +; CHECK-NEXT: lsr x8, x0, x1 +; CHECK-NEXT: mov w9, #64 // =0x40 +; CHECK-NEXT: sub w9, w9, w2 +; CHECK-NEXT: lsl x8, x8, x9 +; CHECK-NEXT: lsr x0, x8, x9 ; CHECK-NEXT: ret %skip = zext i8 %numskipbits to i64 %shifted = lshr i64 %val, %skip @@ -662,10 +655,9 @@ define i64 @bextr64_c2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind ; CHECK: // %bb.0: ; CHECK-NEXT: ldr x8, [x0] ; CHECK-NEXT: neg x9, x2 -; CHECK-NEXT: mov x10, #-1 // =0xffffffffffffffff -; CHECK-NEXT: lsr x9, x10, x9 ; CHECK-NEXT: lsr x8, x8, x1 -; CHECK-NEXT: and x0, x9, x8 +; CHECK-NEXT: lsl x8, x8, x9 +; CHECK-NEXT: lsr x0, x8, x9 ; CHECK-NEXT: ret %val = load i64, ptr %w %shifted = lshr i64 %val, %numskipbits @@ -679,13 +671,12 @@ define i64 @bextr64_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) n ; CHECK-LABEL: bextr64_c3_load_indexzext: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr x8, [x0] +; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 ; CHECK-NEXT: mov w9, #64 // =0x40 -; CHECK-NEXT: mov x10, #-1 // =0xffffffffffffffff ; CHECK-NEXT: sub w9, w9, w2 -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 ; CHECK-NEXT: lsr x8, x8, x1 -; CHECK-NEXT: lsr x9, x10, x9 -; CHECK-NEXT: and x0, x9, x8 +; CHECK-NEXT: lsl x8, x8, x9 +; CHECK-NEXT: lsr x0, x8, x9 ; CHECK-NEXT: ret %val = load i64, ptr %w %skip = zext i8 %numskipbits to i64 @@ -700,11 +691,10 @@ define i64 @bextr64_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) n define i64 @bextr64_c4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { ; CHECK-LABEL: bextr64_c4_commutative: ; CHECK: // %bb.0: -; CHECK-NEXT: neg x8, x2 -; CHECK-NEXT: mov x9, #-1 // =0xffffffffffffffff -; CHECK-NEXT: lsr x10, x0, x1 -; CHECK-NEXT: lsr x8, x9, x8 -; CHECK-NEXT: and x0, x10, x8 +; CHECK-NEXT: lsr x8, x0, x1 +; CHECK-NEXT: neg x9, x2 +; CHECK-NEXT: lsl x8, x8, x9 +; CHECK-NEXT: lsr x0, x8, x9 ; CHECK-NEXT: ret %shifted = lshr i64 %val, %numskipbits %numhighbits = sub i64 64, %numlowbits @@ -737,11 +727,10 @@ define i32 @bextr64_32_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind define i32 @bextr64_32_c1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind { ; CHECK-LABEL: bextr64_32_c1: ; CHECK: // %bb.0: -; CHECK-NEXT: neg w8, w2 -; CHECK-NEXT: mov w9, #-1 // =0xffffffff -; CHECK-NEXT: lsr x10, x0, x1 -; CHECK-NEXT: lsr w8, w9, w8 -; CHECK-NEXT: and w0, w8, w10 +; CHECK-NEXT: lsr x8, x0, x1 +; CHECK-NEXT: neg w9, w2 +; CHECK-NEXT: lsl w8, w8, w9 +; CHECK-NEXT: lsr w0, w8, w9 ; CHECK-NEXT: ret %shifted = lshr i64 %val, %numskipbits %truncshifted = trunc i64 %shifted to i32 @@ -756,11 +745,10 @@ define i32 @bextr64_32_c1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind define i32 @bextr64_32_c2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind { ; CHECK-LABEL: bextr64_32_c2: ; CHECK: // %bb.0: -; CHECK-NEXT: neg w8, w2 -; CHECK-NEXT: mov w9, #-1 // =0xffffffff -; CHECK-NEXT: lsr x10, x0, x1 -; CHECK-NEXT: lsr w8, w9, w8 -; CHECK-NEXT: and w0, w8, w10 +; CHECK-NEXT: lsr x8, x0, x1 +; CHECK-NEXT: neg w9, w2 +; CHECK-NEXT: lsl w8, w8, w9 +; CHECK-NEXT: lsr w0, w8, w9 ; CHECK-NEXT: ret %shifted = lshr i64 %val, %numskipbits %numhighbits = sub i32 32, %numlowbits diff --git a/llvm/test/CodeGen/AArch64/extract-lowbits.ll b/llvm/test/CodeGen/AArch64/extract-lowbits.ll index 4b8f3e86b5fef..368440c65df84 100644 --- a/llvm/test/CodeGen/AArch64/extract-lowbits.ll +++ b/llvm/test/CodeGen/AArch64/extract-lowbits.ll @@ -347,10 +347,9 @@ define i64 @bzhi64_b4_commutative(i64 %val, i64 %numlowbits) nounwind { define i32 @bzhi32_c0(i32 %val, i32 %numlowbits) nounwind { ; CHECK-LABEL: bzhi32_c0: ; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: neg w9, w1 -; CHECK-NEXT: lsr w8, w8, w9 -; CHECK-NEXT: and w0, w8, w0 +; CHECK-NEXT: neg w8, w1 +; CHECK-NEXT: lsl w9, w0, w8 +; CHECK-NEXT: lsr w0, w9, w8 ; CHECK-NEXT: ret %numhighbits = sub i32 32, %numlowbits %mask = lshr i32 -1, %numhighbits @@ -362,10 +361,9 @@ define i32 @bzhi32_c1_indexzext(i32 %val, i8 %numlowbits) nounwind { ; CHECK-LABEL: bzhi32_c1_indexzext: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #32 // =0x20 -; CHECK-NEXT: mov w9, #-1 // =0xffffffff ; CHECK-NEXT: sub w8, w8, w1 -; CHECK-NEXT: lsr w8, w9, w8 -; CHECK-NEXT: and w0, w8, w0 +; CHECK-NEXT: lsl w9, w0, w8 +; CHECK-NEXT: lsr w0, w9, w8 ; CHECK-NEXT: ret %numhighbits = sub i8 32, %numlowbits %sh_prom = zext i8 %numhighbits to i32 @@ -377,11 +375,10 @@ define i32 @bzhi32_c1_indexzext(i32 %val, i8 %numlowbits) nounwind { define i32 @bzhi32_c2_load(ptr %w, i32 %numlowbits) nounwind { ; CHECK-LABEL: bzhi32_c2_load: ; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, #-1 // =0xffffffff +; CHECK-NEXT: ldr w8, [x0] ; CHECK-NEXT: neg w9, w1 -; CHECK-NEXT: ldr w10, [x0] -; CHECK-NEXT: lsr w8, w8, w9 -; CHECK-NEXT: and w0, w8, w10 +; CHECK-NEXT: lsl w8, w8, w9 +; CHECK-NEXT: lsr w0, w8, w9 ; CHECK-NEXT: ret %val = load i32, ptr %w %numhighbits = sub i32 32, %numlowbits @@ -394,11 +391,10 @@ define i32 @bzhi32_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind { ; CHECK-LABEL: bzhi32_c3_load_indexzext: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #32 // =0x20 -; CHECK-NEXT: mov w9, #-1 // =0xffffffff -; CHECK-NEXT: ldr w10, [x0] +; CHECK-NEXT: ldr w9, [x0] ; CHECK-NEXT: sub w8, w8, w1 -; CHECK-NEXT: lsr w8, w9, w8 -; CHECK-NEXT: and w0, w8, w10 +; CHECK-NEXT: lsl w9, w9, w8 +; CHECK-NEXT: lsr w0, w9, w8 ; CHECK-NEXT: ret %val = load i32, ptr %w %numhighbits = sub i8 32, %numlowbits @@ -411,10 +407,9 @@ define i32 @bzhi32_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind { define i32 @bzhi32_c4_commutative(i32 %val, i32 %numlowbits) nounwind { ; CHECK-LABEL: bzhi32_c4_commutative: ; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: neg w9, w1 -; CHECK-NEXT: lsr w8, w8, w9 -; CHECK-NEXT: and w0, w0, w8 +; CHECK-NEXT: neg w8, w1 +; CHECK-NEXT: lsl w9, w0, w8 +; CHECK-NEXT: lsr w0, w9, w8 ; CHECK-NEXT: ret %numhighbits = sub i32 32, %numlowbits %mask = lshr i32 -1, %numhighbits @@ -427,10 +422,9 @@ define i32 @bzhi32_c4_commutative(i32 %val, i32 %numlowbits) nounwind { define i64 @bzhi64_c0(i64 %val, i64 %numlowbits) nounwind { ; CHECK-LABEL: bzhi64_c0: ; CHECK: // %bb.0: -; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff -; CHECK-NEXT: neg x9, x1 -; CHECK-NEXT: lsr x8, x8, x9 -; CHECK-NEXT: and x0, x8, x0 +; CHECK-NEXT: neg x8, x1 +; CHECK-NEXT: lsl x9, x0, x8 +; CHECK-NEXT: lsr x0, x9, x8 ; CHECK-NEXT: ret %numhighbits = sub i64 64, %numlowbits %mask = lshr i64 -1, %numhighbits @@ -442,10 +436,9 @@ define i64 @bzhi64_c1_indexzext(i64 %val, i8 %numlowbits) nounwind { ; CHECK-LABEL: bzhi64_c1_indexzext: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #64 // =0x40 -; CHECK-NEXT: mov x9, #-1 // =0xffffffffffffffff ; CHECK-NEXT: sub w8, w8, w1 -; CHECK-NEXT: lsr x8, x9, x8 -; CHECK-NEXT: and x0, x8, x0 +; CHECK-NEXT: lsl x9, x0, x8 +; CHECK-NEXT: lsr x0, x9, x8 ; CHECK-NEXT: ret %numhighbits = sub i8 64, %numlowbits %sh_prom = zext i8 %numhighbits to i64 @@ -457,11 +450,10 @@ define i64 @bzhi64_c1_indexzext(i64 %val, i8 %numlowbits) nounwind { define i64 @bzhi64_c2_load(ptr %w, i64 %numlowbits) nounwind { ; CHECK-LABEL: bzhi64_c2_load: ; CHECK: // %bb.0: -; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ldr x8, [x0] ; CHECK-NEXT: neg x9, x1 -; CHECK-NEXT: ldr x10, [x0] -; CHECK-NEXT: lsr x8, x8, x9 -; CHECK-NEXT: and x0, x8, x10 +; CHECK-NEXT: lsl x8, x8, x9 +; CHECK-NEXT: lsr x0, x8, x9 ; CHECK-NEXT: ret %val = load i64, ptr %w %numhighbits = sub i64 64, %numlowbits @@ -474,11 +466,10 @@ define i64 @bzhi64_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind { ; CHECK-LABEL: bzhi64_c3_load_indexzext: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #64 // =0x40 -; CHECK-NEXT: mov x9, #-1 // =0xffffffffffffffff -; CHECK-NEXT: ldr x10, [x0] +; CHECK-NEXT: ldr x9, [x0] ; CHECK-NEXT: sub w8, w8, w1 -; CHECK-NEXT: lsr x8, x9, x8 -; CHECK-NEXT: and x0, x8, x10 +; CHECK-NEXT: lsl x9, x9, x8 +; CHECK-NEXT: lsr x0, x9, x8 ; CHECK-NEXT: ret %val = load i64, ptr %w %numhighbits = sub i8 64, %numlowbits @@ -491,10 +482,9 @@ define i64 @bzhi64_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind { define i64 @bzhi64_c4_commutative(i64 %val, i64 %numlowbits) nounwind { ; CHECK-LABEL: bzhi64_c4_commutative: ; CHECK: // %bb.0: -; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff -; CHECK-NEXT: neg x9, x1 -; CHECK-NEXT: lsr x8, x8, x9 -; CHECK-NEXT: and x0, x0, x8 +; CHECK-NEXT: neg x8, x1 +; CHECK-NEXT: lsl x9, x0, x8 +; CHECK-NEXT: lsr x0, x9, x8 ; CHECK-NEXT: ret %numhighbits = sub i64 64, %numlowbits %mask = lshr i64 -1, %numhighbits diff --git a/llvm/test/CodeGen/ARM/and-mask-variable.ll b/llvm/test/CodeGen/ARM/and-mask-variable.ll new file mode 100644 index 0000000000000..0f84b76f97a6b --- /dev/null +++ b/llvm/test/CodeGen/ARM/and-mask-variable.ll @@ -0,0 +1,90 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv7m-eabi %s -o - | FileCheck %s --check-prefix V7M +; RUN: llc -mtriple=armv7a-eabi %s -o - | FileCheck %s --check-prefix V7A +; RUN: llc -mtriple=thumbv7a-eabi %s -o - | FileCheck %s --check-prefix V7A-T +; RUN: llc -mtriple=armv6m-eabi %s -o - | FileCheck %s --check-prefix V6M + +define i32 @mask_pair(i32 %x, i32 %y) { +; V7M-LABEL: mask_pair: +; V7M: @ %bb.0: +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: mask_pair: +; V7A: @ %bb.0: +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: mask_pair: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: mask_pair: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: bx lr + %shl = shl nsw i32 -1, %y + %and = and i32 %shl, %x + ret i32 %and +} + +define i64 @mask_pair_64(i64 %x, i64 %y) { +; V7M-LABEL: mask_pair_64: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsl.w r12, r3, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl.w r12, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl r3, r2 +; V7M-NEXT: and.w r0, r0, r12 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: bx lr +; +; V7A-LABEL: mask_pair_64: +; V7A: @ %bb.0: +; V7A-NEXT: subs r12, r2, #32 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsl r2, r3, r2 +; V7A-NEXT: lslpl r3, r3, r12 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: and r1, r3, r1 +; V7A-NEXT: and r0, r2, r0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: mask_pair_64: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsl.w r12, r3, r2 +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl.w r12, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl r3, r2 +; V7A-T-NEXT: and.w r0, r0, r12 +; V7A-T-NEXT: ands r1, r3 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: mask_pair_64: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %shl = shl nsw i64 -1, %y + %and = and i64 %shl, %x + ret i64 %and +} diff --git a/llvm/test/CodeGen/ARM/extract-bits.ll b/llvm/test/CodeGen/ARM/extract-bits.ll new file mode 100644 index 0000000000000..237c8bc7d2906 --- /dev/null +++ b/llvm/test/CodeGen/ARM/extract-bits.ll @@ -0,0 +1,5674 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv7m-eabi %s -o - | FileCheck %s --check-prefix V7M +; RUN: llc -mtriple=armv7a-eabi %s -o - | FileCheck %s --check-prefix V7A +; RUN: llc -mtriple=thumbv7a-eabi %s -o - | FileCheck %s --check-prefix V7A-T +; RUN: llc -mtriple=armv6m-eabi %s -o - | FileCheck %s --check-prefix V6M + +; *Please* keep in sync with test/CodeGen/X86/extract-bits.ll + +; https://bugs.llvm.org/show_bug.cgi?id=36419 +; https://bugs.llvm.org/show_bug.cgi?id=37603 +; https://bugs.llvm.org/show_bug.cgi?id=37610 + +; Patterns: +; a) (x >> start) & (1 << nbits) - 1 +; b) (x >> start) & ~(-1 << nbits) +; c) (x >> start) & (-1 >> (32 - y)) +; d) (x >> start) << (32 - y) >> (32 - y) +; are equivalent. + +; ---------------------------------------------------------------------------- ; +; Pattern a. 32-bit +; ---------------------------------------------------------------------------- ; + +define i32 @bextr32_a0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { +; CHECK-LABEL: bextr32_a0: +; CHECK: @ %bb.0: +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: movs r1, #1 +; CHECK-NEXT: lsls r1, r2 +; CHECK-NEXT: subs r1, #1 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr32_a0: +; V7M: @ %bb.0: +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: movs r1, #1 +; V7M-NEXT: lsls r1, r2 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_a0: +; V7A: @ %bb.0: +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r2, r3, r12, lsl r2 +; V7A-NEXT: and r0, r2, r0, lsr r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_a0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsls r1, r2 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_a0: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #1 +; V6M-NEXT: lsls r1, r2 +; V6M-NEXT: subs r1, r1, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %shifted = lshr i32 %val, %numskipbits + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_a0_arithmetic(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { +; CHECK-LABEL: bextr32_a0_arithmetic: +; CHECK: @ %bb.0: +; CHECK-NEXT: asrs r0, r1 +; CHECK-NEXT: movs r1, #1 +; CHECK-NEXT: lsls r1, r2 +; CHECK-NEXT: subs r1, #1 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr32_a0_arithmetic: +; V7M: @ %bb.0: +; V7M-NEXT: asrs r0, r1 +; V7M-NEXT: movs r1, #1 +; V7M-NEXT: lsls r1, r2 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_a0_arithmetic: +; V7A: @ %bb.0: +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r2, r3, r12, lsl r2 +; V7A-NEXT: and r0, r2, r0, asr r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_a0_arithmetic: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: asrs r0, r1 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsls r1, r2 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_a0_arithmetic: +; V6M: @ %bb.0: +; V6M-NEXT: asrs r0, r1 +; V6M-NEXT: movs r1, #1 +; V6M-NEXT: lsls r1, r2 +; V6M-NEXT: subs r1, r1, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %shifted = ashr i32 %val, %numskipbits + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_a1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind { +; CHECK-LABEL: bextr32_a1_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: movs r1, #1 +; CHECK-NEXT: lsls r1, r2 +; CHECK-NEXT: subs r1, #1 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr32_a1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: movs r1, #1 +; V7M-NEXT: lsls r1, r2 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_a1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r2, r3, r12, lsl r2 +; V7A-NEXT: and r0, r2, r0, lsr r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_a1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsls r1, r2 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_a1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #1 +; V6M-NEXT: lsls r1, r2 +; V6M-NEXT: subs r1, r1, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %skip = zext i8 %numskipbits to i32 + %shifted = lshr i32 %val, %skip + %conv = zext i8 %numlowbits to i32 + %onebit = shl i32 1, %conv + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_a2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind { +; CHECK-LABEL: bextr32_a2_load: +; CHECK: @ %bb.0: +; CHECK-NEXT: ldr r0, [r0] +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: movs r1, #1 +; CHECK-NEXT: lsls r1, r2 +; CHECK-NEXT: subs r1, #1 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr32_a2_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: movs r1, #1 +; V7M-NEXT: lsls r1, r2 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_a2_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r2, r3, r12, lsl r2 +; V7A-NEXT: and r0, r2, r0, lsr r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_a2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsls r1, r2 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_a2_load: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r3, [r0] +; V6M-NEXT: lsrs r3, r1 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: lsls r0, r2 +; V6M-NEXT: subs r0, r0, #1 +; V6M-NEXT: ands r0, r3 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %shifted = lshr i32 %val, %numskipbits + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_a3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind { +; CHECK-LABEL: bextr32_a3_load_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: ldr r0, [r0] +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: movs r1, #1 +; CHECK-NEXT: lsls r1, r2 +; CHECK-NEXT: subs r1, #1 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr32_a3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: movs r1, #1 +; V7M-NEXT: lsls r1, r2 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_a3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r2, r3, r12, lsl r2 +; V7A-NEXT: and r0, r2, r0, lsr r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_a3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsls r1, r2 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_a3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r3, [r0] +; V6M-NEXT: lsrs r3, r1 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: lsls r0, r2 +; V6M-NEXT: subs r0, r0, #1 +; V6M-NEXT: ands r0, r3 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %skip = zext i8 %numskipbits to i32 + %shifted = lshr i32 %val, %skip + %conv = zext i8 %numlowbits to i32 + %onebit = shl i32 1, %conv + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_a4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { +; CHECK-LABEL: bextr32_a4_commutative: +; CHECK: @ %bb.0: +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: movs r1, #1 +; CHECK-NEXT: lsls r1, r2 +; CHECK-NEXT: subs r1, #1 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr32_a4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: movs r1, #1 +; V7M-NEXT: lsls r1, r2 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_a4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r2, r3, r12, lsl r2 +; V7A-NEXT: and r0, r2, r0, lsr r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_a4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsls r1, r2 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_a4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #1 +; V6M-NEXT: lsls r1, r2 +; V6M-NEXT: subs r1, r1, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %shifted = lshr i32 %val, %numskipbits + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %shifted, %mask ; swapped order + ret i32 %masked +} + +; 64-bit + +define i64 @bextr64_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_a0: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r4, lr} +; CHECK-NEXT: push {r4, lr} +; CHECK-NEXT: ldr.w r12, [sp, #8] +; CHECK-NEXT: mov.w lr, #1 +; CHECK-NEXT: lsrs r0, r2 +; CHECK-NEXT: rsb.w r4, r12, #32 +; CHECK-NEXT: subs.w r3, r12, #32 +; CHECK-NEXT: lsr.w r4, lr, r4 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl.w r4, lr, r3 +; CHECK-NEXT: lsl.w r3, lr, r12 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r3, #0 +; CHECK-NEXT: subs r3, #1 +; CHECK-NEXT: sbc r12, r4, #0 +; CHECK-NEXT: rsb.w r4, r2, #32 +; CHECK-NEXT: lsl.w r4, r1, r4 +; CHECK-NEXT: orrs r0, r4 +; CHECK-NEXT: subs.w r4, r2, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r1, r4 +; CHECK-NEXT: lsr.w r1, r1, r2 +; CHECK-NEXT: and.w r0, r0, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: and.w r1, r1, r12 +; CHECK-NEXT: pop {r4, pc} +; +; V7M-LABEL: bextr64_a0: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: mov.w lr, #1 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: rsb.w r4, r12, #32 +; V7M-NEXT: subs.w r3, r12, #32 +; V7M-NEXT: lsr.w r4, lr, r4 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r4, lr, r3 +; V7M-NEXT: lsl.w r3, lr, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: subs r3, #1 +; V7M-NEXT: sbc r12, r4, #0 +; V7M-NEXT: rsb.w r4, r2, #32 +; V7M-NEXT: lsl.w r4, r1, r4 +; V7M-NEXT: orrs r0, r4 +; V7M-NEXT: subs.w r4, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r4 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: and.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: and.w r1, r1, r12 +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_a0: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, lr} +; V7A-NEXT: push {r4, lr} +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: mov lr, #1 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: rsb r3, r12, #32 +; V7A-NEXT: subs r4, r12, #32 +; V7A-NEXT: lsr r3, lr, r3 +; V7A-NEXT: lslpl r3, lr, r4 +; V7A-NEXT: lsl r4, lr, r12 +; V7A-NEXT: movwpl r4, #0 +; V7A-NEXT: subs r4, r4, #1 +; V7A-NEXT: sbc r12, r3, #0 +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsrpl r0, r1, r3 +; V7A-NEXT: lsr r1, r1, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: and r0, r4, r0 +; V7A-NEXT: and r1, r12, r1 +; V7A-NEXT: pop {r4, pc} +; +; V7A-T-LABEL: bextr64_a0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: mov.w lr, #1 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: rsb.w r4, r12, #32 +; V7A-T-NEXT: subs.w r3, r12, #32 +; V7A-T-NEXT: lsr.w r4, lr, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r4, lr, r3 +; V7A-T-NEXT: lsl.w r3, lr, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r3, #0 +; V7A-T-NEXT: subs r3, #1 +; V7A-T-NEXT: sbc r12, r4, #0 +; V7A-T-NEXT: rsb.w r4, r2, #32 +; V7A-T-NEXT: lsl.w r4, r1, r4 +; V7A-T-NEXT: orrs r0, r4 +; V7A-T-NEXT: subs.w r4, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r4 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: and.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: and.w r1, r1, r12 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_a0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, r7, lr} +; V6M-NEXT: push {r4, r5, r6, r7, lr} +; V6M-NEXT: .pad #12 +; V6M-NEXT: sub sp, #12 +; V6M-NEXT: str r2, [sp, #8] @ 4-byte Spill +; V6M-NEXT: str r1, [sp, #4] @ 4-byte Spill +; V6M-NEXT: mov r6, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r7, #0 +; V6M-NEXT: ldr r2, [sp, #32] +; V6M-NEXT: mov r1, r7 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: subs r5, r0, #1 +; V6M-NEXT: sbcs r4, r7 +; V6M-NEXT: mov r0, r6 +; V6M-NEXT: ldr r1, [sp, #4] @ 4-byte Reload +; V6M-NEXT: ldr r2, [sp, #8] @ 4-byte Reload +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: add sp, #12 +; V6M-NEXT: pop {r4, r5, r6, r7, pc} + %shifted = lshr i64 %val, %numskipbits + %onebit = shl i64 1, %numlowbits + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_a0_arithmetic(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_a0_arithmetic: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r4, lr} +; CHECK-NEXT: push {r4, lr} +; CHECK-NEXT: ldr.w r12, [sp, #8] +; CHECK-NEXT: mov.w lr, #1 +; CHECK-NEXT: lsrs r0, r2 +; CHECK-NEXT: rsb.w r4, r12, #32 +; CHECK-NEXT: subs.w r3, r12, #32 +; CHECK-NEXT: lsr.w r4, lr, r4 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl.w r4, lr, r3 +; CHECK-NEXT: lsl.w r3, lr, r12 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r3, #0 +; CHECK-NEXT: subs r3, #1 +; CHECK-NEXT: sbc r12, r4, #0 +; CHECK-NEXT: rsb.w r4, r2, #32 +; CHECK-NEXT: lsl.w r4, r1, r4 +; CHECK-NEXT: orrs r0, r4 +; CHECK-NEXT: subs.w r4, r2, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: asrpl.w r0, r1, r4 +; CHECK-NEXT: asr.w r2, r1, r2 +; CHECK-NEXT: and.w r0, r0, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: asrpl r2, r1, #31 +; CHECK-NEXT: and.w r1, r12, r2 +; CHECK-NEXT: pop {r4, pc} +; +; V7M-LABEL: bextr64_a0_arithmetic: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: mov.w lr, #1 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: rsb.w r4, r12, #32 +; V7M-NEXT: subs.w r3, r12, #32 +; V7M-NEXT: lsr.w r4, lr, r4 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r4, lr, r3 +; V7M-NEXT: lsl.w r3, lr, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: subs r3, #1 +; V7M-NEXT: sbc r12, r4, #0 +; V7M-NEXT: rsb.w r4, r2, #32 +; V7M-NEXT: lsl.w r4, r1, r4 +; V7M-NEXT: orrs r0, r4 +; V7M-NEXT: subs.w r4, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: asrpl.w r0, r1, r4 +; V7M-NEXT: asr.w r2, r1, r2 +; V7M-NEXT: and.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: asrpl r2, r1, #31 +; V7M-NEXT: and.w r1, r12, r2 +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_a0_arithmetic: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, lr} +; V7A-NEXT: push {r4, lr} +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: mov lr, #1 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: rsb r3, r12, #32 +; V7A-NEXT: subs r4, r12, #32 +; V7A-NEXT: lsr r3, lr, r3 +; V7A-NEXT: lslpl r3, lr, r4 +; V7A-NEXT: lsl r4, lr, r12 +; V7A-NEXT: movwpl r4, #0 +; V7A-NEXT: subs r4, r4, #1 +; V7A-NEXT: sbc r12, r3, #0 +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: asr r2, r1, r2 +; V7A-NEXT: asrpl r0, r1, r3 +; V7A-NEXT: asrpl r2, r1, #31 +; V7A-NEXT: and r0, r4, r0 +; V7A-NEXT: and r1, r12, r2 +; V7A-NEXT: pop {r4, pc} +; +; V7A-T-LABEL: bextr64_a0_arithmetic: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: mov.w lr, #1 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: rsb.w r4, r12, #32 +; V7A-T-NEXT: subs.w r3, r12, #32 +; V7A-T-NEXT: lsr.w r4, lr, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r4, lr, r3 +; V7A-T-NEXT: lsl.w r3, lr, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r3, #0 +; V7A-T-NEXT: subs r3, #1 +; V7A-T-NEXT: sbc r12, r4, #0 +; V7A-T-NEXT: rsb.w r4, r2, #32 +; V7A-T-NEXT: lsl.w r4, r1, r4 +; V7A-T-NEXT: orrs r0, r4 +; V7A-T-NEXT: subs.w r4, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: asrpl.w r0, r1, r4 +; V7A-T-NEXT: asr.w r2, r1, r2 +; V7A-T-NEXT: and.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: asrpl r2, r1, #31 +; V7A-T-NEXT: and.w r1, r12, r2 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_a0_arithmetic: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, r7, lr} +; V6M-NEXT: push {r4, r5, r6, r7, lr} +; V6M-NEXT: .pad #12 +; V6M-NEXT: sub sp, #12 +; V6M-NEXT: str r2, [sp, #8] @ 4-byte Spill +; V6M-NEXT: str r1, [sp, #4] @ 4-byte Spill +; V6M-NEXT: mov r6, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r7, #0 +; V6M-NEXT: ldr r2, [sp, #32] +; V6M-NEXT: mov r1, r7 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: subs r5, r0, #1 +; V6M-NEXT: sbcs r4, r7 +; V6M-NEXT: mov r0, r6 +; V6M-NEXT: ldr r1, [sp, #4] @ 4-byte Reload +; V6M-NEXT: ldr r2, [sp, #8] @ 4-byte Reload +; V6M-NEXT: bl __aeabi_lasr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: add sp, #12 +; V6M-NEXT: pop {r4, r5, r6, r7, pc} + %shifted = ashr i64 %val, %numskipbits + %onebit = shl i64 1, %numlowbits + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_a1_indexzext(i64 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind { +; CHECK-LABEL: bextr64_a1_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r4, lr} +; CHECK-NEXT: push {r4, lr} +; CHECK-NEXT: rsb.w r4, r3, #32 +; CHECK-NEXT: mov.w lr, #1 +; CHECK-NEXT: subs.w r12, r3, #32 +; CHECK-NEXT: lsl.w r3, lr, r3 +; CHECK-NEXT: lsr.w r4, lr, r4 +; CHECK-NEXT: lsr.w r0, r0, r2 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl.w r4, lr, r12 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r3, #0 +; CHECK-NEXT: subs r3, #1 +; CHECK-NEXT: sbc r12, r4, #0 +; CHECK-NEXT: rsb.w r4, r2, #32 +; CHECK-NEXT: lsl.w r4, r1, r4 +; CHECK-NEXT: orrs r0, r4 +; CHECK-NEXT: subs.w r4, r2, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r1, r4 +; CHECK-NEXT: lsr.w r1, r1, r2 +; CHECK-NEXT: and.w r0, r0, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: and.w r1, r1, r12 +; CHECK-NEXT: pop {r4, pc} +; +; V7M-LABEL: bextr64_a1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: rsb.w r4, r3, #32 +; V7M-NEXT: mov.w lr, #1 +; V7M-NEXT: subs.w r12, r3, #32 +; V7M-NEXT: lsl.w r3, lr, r3 +; V7M-NEXT: lsr.w r4, lr, r4 +; V7M-NEXT: lsr.w r0, r0, r2 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r4, lr, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: subs r3, #1 +; V7M-NEXT: sbc r12, r4, #0 +; V7M-NEXT: rsb.w r4, r2, #32 +; V7M-NEXT: lsl.w r4, r1, r4 +; V7M-NEXT: orrs r0, r4 +; V7M-NEXT: subs.w r4, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r4 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: and.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: and.w r1, r1, r12 +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_a1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, lr} +; V7A-NEXT: push {r4, lr} +; V7A-NEXT: rsb r12, r3, #32 +; V7A-NEXT: mov lr, #1 +; V7A-NEXT: subs r4, r3, #32 +; V7A-NEXT: lsl r3, lr, r3 +; V7A-NEXT: lsr r12, lr, r12 +; V7A-NEXT: movwpl r3, #0 +; V7A-NEXT: lslpl r12, lr, r4 +; V7A-NEXT: rsb r4, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: subs r3, r3, #1 +; V7A-NEXT: sbc r12, r12, #0 +; V7A-NEXT: orr r0, r0, r1, lsl r4 +; V7A-NEXT: subs r4, r2, #32 +; V7A-NEXT: lsrpl r0, r1, r4 +; V7A-NEXT: lsr r1, r1, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: and r0, r3, r0 +; V7A-NEXT: and r1, r12, r1 +; V7A-NEXT: pop {r4, pc} +; +; V7A-T-LABEL: bextr64_a1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: rsb.w r4, r3, #32 +; V7A-T-NEXT: mov.w lr, #1 +; V7A-T-NEXT: subs.w r12, r3, #32 +; V7A-T-NEXT: lsl.w r3, lr, r3 +; V7A-T-NEXT: lsr.w r4, lr, r4 +; V7A-T-NEXT: lsr.w r0, r0, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r4, lr, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r3, #0 +; V7A-T-NEXT: subs r3, #1 +; V7A-T-NEXT: sbc r12, r4, #0 +; V7A-T-NEXT: rsb.w r4, r2, #32 +; V7A-T-NEXT: lsl.w r4, r1, r4 +; V7A-T-NEXT: orrs r0, r4 +; V7A-T-NEXT: subs.w r4, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r4 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: and.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: and.w r1, r1, r12 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_a1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, r7, lr} +; V6M-NEXT: push {r4, r5, r6, r7, lr} +; V6M-NEXT: .pad #12 +; V6M-NEXT: sub sp, #12 +; V6M-NEXT: str r2, [sp, #8] @ 4-byte Spill +; V6M-NEXT: str r1, [sp, #4] @ 4-byte Spill +; V6M-NEXT: mov r6, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r7, #0 +; V6M-NEXT: mov r1, r7 +; V6M-NEXT: mov r2, r3 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: subs r5, r0, #1 +; V6M-NEXT: sbcs r4, r7 +; V6M-NEXT: mov r0, r6 +; V6M-NEXT: ldr r1, [sp, #4] @ 4-byte Reload +; V6M-NEXT: ldr r2, [sp, #8] @ 4-byte Reload +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: add sp, #12 +; V6M-NEXT: pop {r4, r5, r6, r7, pc} + %skip = zext i8 %numskipbits to i64 + %shifted = lshr i64 %val, %skip + %conv = zext i8 %numlowbits to i64 + %onebit = shl i64 1, %conv + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_a2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_a2_load: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r7, lr} +; CHECK-NEXT: push {r7, lr} +; CHECK-NEXT: ldr.w r12, [sp, #8] +; CHECK-NEXT: mov.w lr, #1 +; CHECK-NEXT: rsb.w r1, r12, #32 +; CHECK-NEXT: subs.w r3, r12, #32 +; CHECK-NEXT: lsr.w r1, lr, r1 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl.w r1, lr, r3 +; CHECK-NEXT: lsl.w r3, lr, r12 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r3, #0 +; CHECK-NEXT: subs.w lr, r3, #1 +; CHECK-NEXT: ldrd r0, r3, [r0] +; CHECK-NEXT: sbc r12, r1, #0 +; CHECK-NEXT: rsb.w r1, r2, #32 +; CHECK-NEXT: lsl.w r1, r3, r1 +; CHECK-NEXT: lsrs r0, r2 +; CHECK-NEXT: orrs r0, r1 +; CHECK-NEXT: subs.w r1, r2, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r3, r1 +; CHECK-NEXT: lsr.w r1, r3, r2 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: and.w r0, r0, lr +; CHECK-NEXT: and.w r1, r1, r12 +; CHECK-NEXT: pop {r7, pc} +; +; V7M-LABEL: bextr64_a2_load: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: mov.w lr, #1 +; V7M-NEXT: rsb.w r1, r12, #32 +; V7M-NEXT: subs.w r3, r12, #32 +; V7M-NEXT: lsr.w r1, lr, r1 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, lr, r3 +; V7M-NEXT: lsl.w r3, lr, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: subs.w lr, r3, #1 +; V7M-NEXT: ldrd r0, r3, [r0] +; V7M-NEXT: sbc r12, r1, #0 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: lsl.w r1, r3, r1 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: orrs r0, r1 +; V7M-NEXT: subs.w r1, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r3, r1 +; V7M-NEXT: lsr.w r1, r3, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: and.w r0, r0, lr +; V7M-NEXT: and.w r1, r1, r12 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bextr64_a2_load: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r5, r6, lr} +; V7A-NEXT: push {r4, r5, r6, lr} +; V7A-NEXT: ldr r1, [sp, #16] +; V7A-NEXT: mov r3, #1 +; V7A-NEXT: ldr r6, [r0] +; V7A-NEXT: ldr r5, [r0, #4] +; V7A-NEXT: rsb r0, r1, #32 +; V7A-NEXT: subs r4, r1, #32 +; V7A-NEXT: lsl r1, r3, r1 +; V7A-NEXT: lsr r0, r3, r0 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: lslpl r0, r3, r4 +; V7A-NEXT: subs r1, r1, #1 +; V7A-NEXT: sbc r3, r0, #0 +; V7A-NEXT: lsr r0, r6, r2 +; V7A-NEXT: rsb r6, r2, #32 +; V7A-NEXT: orr r0, r0, r5, lsl r6 +; V7A-NEXT: subs r6, r2, #32 +; V7A-NEXT: lsrpl r0, r5, r6 +; V7A-NEXT: and r0, r1, r0 +; V7A-NEXT: lsr r1, r5, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: and r1, r3, r1 +; V7A-NEXT: pop {r4, r5, r6, pc} +; +; V7A-T-LABEL: bextr64_a2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: movs r3, #1 +; V7A-T-NEXT: ldrd lr, r1, [r0] +; V7A-T-NEXT: rsb.w r4, r12, #32 +; V7A-T-NEXT: subs.w r0, r12, #32 +; V7A-T-NEXT: lsr.w r4, r3, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r4, r3, r0 +; V7A-T-NEXT: lsl.w r0, r3, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsr.w r3, lr, r2 +; V7A-T-NEXT: subs r0, #1 +; V7A-T-NEXT: sbc r12, r4, #0 +; V7A-T-NEXT: rsb.w r4, r2, #32 +; V7A-T-NEXT: lsl.w r4, r1, r4 +; V7A-T-NEXT: orrs r3, r4 +; V7A-T-NEXT: subs.w r4, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r3, r1, r4 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: and.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: and.w r1, r1, r12 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_a2_load: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, r7, lr} +; V6M-NEXT: push {r4, r5, r6, r7, lr} +; V6M-NEXT: .pad #4 +; V6M-NEXT: sub sp, #4 +; V6M-NEXT: str r2, [sp] @ 4-byte Spill +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r7, #0 +; V6M-NEXT: ldr r2, [sp, #24] +; V6M-NEXT: mov r1, r7 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r6, r1 +; V6M-NEXT: subs r4, r0, #1 +; V6M-NEXT: sbcs r6, r7 +; V6M-NEXT: ldm r5!, {r0, r1} +; V6M-NEXT: ldr r2, [sp] @ 4-byte Reload +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r4 +; V6M-NEXT: ands r1, r6 +; V6M-NEXT: add sp, #4 +; V6M-NEXT: pop {r4, r5, r6, r7, pc} + %val = load i64, ptr %w + %shifted = lshr i64 %val, %numskipbits + %onebit = shl i64 1, %numlowbits + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_a3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind { +; CHECK-LABEL: bextr64_a3_load_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r7, lr} +; CHECK-NEXT: push {r7, lr} +; CHECK-NEXT: rsb.w r3, r2, #32 +; CHECK-NEXT: mov.w r12, #1 +; CHECK-NEXT: subs.w lr, r2, #32 +; CHECK-NEXT: lsl.w r2, r12, r2 +; CHECK-NEXT: lsr.w r3, r12, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl.w r3, r12, lr +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r2, #0 +; CHECK-NEXT: subs.w lr, r2, #1 +; CHECK-NEXT: ldrd r0, r2, [r0] +; CHECK-NEXT: sbc r12, r3, #0 +; CHECK-NEXT: rsb.w r3, r1, #32 +; CHECK-NEXT: lsl.w r3, r2, r3 +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: orrs r0, r3 +; CHECK-NEXT: subs.w r3, r1, #32 +; CHECK-NEXT: lsr.w r1, r2, r1 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r2, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: and.w r0, r0, lr +; CHECK-NEXT: and.w r1, r1, r12 +; CHECK-NEXT: pop {r7, pc} +; +; V7M-LABEL: bextr64_a3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: mov.w r12, #1 +; V7M-NEXT: subs.w lr, r2, #32 +; V7M-NEXT: lsl.w r2, r12, r2 +; V7M-NEXT: lsr.w r3, r12, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r3, r12, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: subs.w lr, r2, #1 +; V7M-NEXT: ldrd r0, r2, [r0] +; V7M-NEXT: sbc r12, r3, #0 +; V7M-NEXT: rsb.w r3, r1, #32 +; V7M-NEXT: lsl.w r3, r2, r3 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: orrs r0, r3 +; V7M-NEXT: subs.w r3, r1, #32 +; V7M-NEXT: lsr.w r1, r2, r1 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r2, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: and.w r0, r0, lr +; V7M-NEXT: and.w r1, r1, r12 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bextr64_a3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r5, r6, lr} +; V7A-NEXT: push {r4, r5, r6, lr} +; V7A-NEXT: ldr r6, [r0] +; V7A-NEXT: mov r3, #1 +; V7A-NEXT: ldr r5, [r0, #4] +; V7A-NEXT: rsb r0, r2, #32 +; V7A-NEXT: subs r4, r2, #32 +; V7A-NEXT: lsl r2, r3, r2 +; V7A-NEXT: lsr r0, r3, r0 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: lslpl r0, r3, r4 +; V7A-NEXT: subs r3, r2, #1 +; V7A-NEXT: sbc r0, r0, #0 +; V7A-NEXT: lsr r2, r5, r1 +; V7A-NEXT: subs r4, r1, #32 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: and r2, r0, r2 +; V7A-NEXT: lsr r0, r6, r1 +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: orr r0, r0, r5, lsl r1 +; V7A-NEXT: mov r1, r2 +; V7A-NEXT: lsrpl r0, r5, r4 +; V7A-NEXT: and r0, r3, r0 +; V7A-NEXT: pop {r4, r5, r6, pc} +; +; V7A-T-LABEL: bextr64_a3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: rsb.w r4, r2, #32 +; V7A-T-NEXT: mov.w lr, #1 +; V7A-T-NEXT: subs.w r3, r2, #32 +; V7A-T-NEXT: lsl.w r2, lr, r2 +; V7A-T-NEXT: lsr.w r4, lr, r4 +; V7A-T-NEXT: ldrd r12, r0, [r0] +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r4, lr, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: subs.w lr, r2, #1 +; V7A-T-NEXT: sbc r2, r4, #0 +; V7A-T-NEXT: lsr.w r4, r0, r1 +; V7A-T-NEXT: subs.w r3, r1, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r4, #0 +; V7A-T-NEXT: and.w r2, r2, r4 +; V7A-T-NEXT: rsb.w r4, r1, #32 +; V7A-T-NEXT: lsr.w r1, r12, r1 +; V7A-T-NEXT: lsl.w r4, r0, r4 +; V7A-T-NEXT: orr.w r1, r1, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r1, r0, r3 +; V7A-T-NEXT: and.w r0, lr, r1 +; V7A-T-NEXT: mov r1, r2 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_a3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, r7, lr} +; V6M-NEXT: push {r4, r5, r6, r7, lr} +; V6M-NEXT: .pad #4 +; V6M-NEXT: sub sp, #4 +; V6M-NEXT: str r1, [sp] @ 4-byte Spill +; V6M-NEXT: mov r6, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r7, #0 +; V6M-NEXT: mov r1, r7 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r5, r1 +; V6M-NEXT: subs r4, r0, #1 +; V6M-NEXT: sbcs r5, r7 +; V6M-NEXT: ldm r6!, {r0, r1} +; V6M-NEXT: ldr r2, [sp] @ 4-byte Reload +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r4 +; V6M-NEXT: ands r1, r5 +; V6M-NEXT: add sp, #4 +; V6M-NEXT: pop {r4, r5, r6, r7, pc} + %val = load i64, ptr %w + %skip = zext i8 %numskipbits to i64 + %shifted = lshr i64 %val, %skip + %conv = zext i8 %numlowbits to i64 + %onebit = shl i64 1, %conv + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_a4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_a4_commutative: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r4, lr} +; CHECK-NEXT: push {r4, lr} +; CHECK-NEXT: ldr.w r12, [sp, #8] +; CHECK-NEXT: mov.w lr, #1 +; CHECK-NEXT: lsrs r0, r2 +; CHECK-NEXT: rsb.w r4, r12, #32 +; CHECK-NEXT: subs.w r3, r12, #32 +; CHECK-NEXT: lsr.w r4, lr, r4 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl.w r4, lr, r3 +; CHECK-NEXT: lsl.w r3, lr, r12 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r3, #0 +; CHECK-NEXT: subs r3, #1 +; CHECK-NEXT: sbc r12, r4, #0 +; CHECK-NEXT: rsb.w r4, r2, #32 +; CHECK-NEXT: lsl.w r4, r1, r4 +; CHECK-NEXT: orrs r0, r4 +; CHECK-NEXT: subs.w r4, r2, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r1, r4 +; CHECK-NEXT: lsr.w r1, r1, r2 +; CHECK-NEXT: and.w r0, r0, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: and.w r1, r1, r12 +; CHECK-NEXT: pop {r4, pc} +; +; V7M-LABEL: bextr64_a4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: mov.w lr, #1 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: rsb.w r4, r12, #32 +; V7M-NEXT: subs.w r3, r12, #32 +; V7M-NEXT: lsr.w r4, lr, r4 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r4, lr, r3 +; V7M-NEXT: lsl.w r3, lr, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: subs r3, #1 +; V7M-NEXT: sbc r12, r4, #0 +; V7M-NEXT: rsb.w r4, r2, #32 +; V7M-NEXT: lsl.w r4, r1, r4 +; V7M-NEXT: orrs r0, r4 +; V7M-NEXT: subs.w r4, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r4 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: and.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: and.w r1, r1, r12 +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_a4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, lr} +; V7A-NEXT: push {r4, lr} +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: mov lr, #1 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: rsb r3, r12, #32 +; V7A-NEXT: subs r4, r12, #32 +; V7A-NEXT: lsr r3, lr, r3 +; V7A-NEXT: lslpl r3, lr, r4 +; V7A-NEXT: lsl r4, lr, r12 +; V7A-NEXT: movwpl r4, #0 +; V7A-NEXT: subs r4, r4, #1 +; V7A-NEXT: sbc r12, r3, #0 +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsrpl r0, r1, r3 +; V7A-NEXT: lsr r1, r1, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: and r0, r0, r4 +; V7A-NEXT: and r1, r1, r12 +; V7A-NEXT: pop {r4, pc} +; +; V7A-T-LABEL: bextr64_a4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: mov.w lr, #1 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: rsb.w r4, r12, #32 +; V7A-T-NEXT: subs.w r3, r12, #32 +; V7A-T-NEXT: lsr.w r4, lr, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r4, lr, r3 +; V7A-T-NEXT: lsl.w r3, lr, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r3, #0 +; V7A-T-NEXT: subs r3, #1 +; V7A-T-NEXT: sbc r12, r4, #0 +; V7A-T-NEXT: rsb.w r4, r2, #32 +; V7A-T-NEXT: lsl.w r4, r1, r4 +; V7A-T-NEXT: orrs r0, r4 +; V7A-T-NEXT: subs.w r4, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r4 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: and.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: and.w r1, r1, r12 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_a4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, r7, lr} +; V6M-NEXT: push {r4, r5, r6, r7, lr} +; V6M-NEXT: .pad #12 +; V6M-NEXT: sub sp, #12 +; V6M-NEXT: str r2, [sp, #8] @ 4-byte Spill +; V6M-NEXT: str r1, [sp, #4] @ 4-byte Spill +; V6M-NEXT: mov r6, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r7, #0 +; V6M-NEXT: ldr r2, [sp, #32] +; V6M-NEXT: mov r1, r7 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: subs r5, r0, #1 +; V6M-NEXT: sbcs r4, r7 +; V6M-NEXT: mov r0, r6 +; V6M-NEXT: ldr r1, [sp, #4] @ 4-byte Reload +; V6M-NEXT: ldr r2, [sp, #8] @ 4-byte Reload +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: add sp, #12 +; V6M-NEXT: pop {r4, r5, r6, r7, pc} + %shifted = lshr i64 %val, %numskipbits + %onebit = shl i64 1, %numlowbits + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %shifted, %mask ; swapped order + ret i64 %masked +} + +; 64-bit, but with 32-bit output + +; Everything done in 64-bit, truncation happens last. +define i32 @bextr64_32_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_32_a0: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsb.w r3, r2, #32 +; CHECK-NEXT: lsrs r0, r2 +; CHECK-NEXT: subs r2, #32 +; CHECK-NEXT: lsl.w r3, r1, r3 +; CHECK-NEXT: orr.w r0, r0, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r1, r2 +; CHECK-NEXT: ldr r1, [sp] +; CHECK-NEXT: movs r2, #1 +; CHECK-NEXT: lsls r2, r1 +; CHECK-NEXT: subs r1, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r2, #0 +; CHECK-NEXT: subs r1, r2, #1 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr64_32_a0: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldr r1, [sp] +; V7M-NEXT: movs r2, #1 +; V7M-NEXT: lsls r2, r1 +; V7M-NEXT: subs r1, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: subs r1, r2, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_a0: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldr r12, [sp] +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: mov r1, #1 +; V7A-NEXT: lsl r1, r1, r12 +; V7A-NEXT: subs r2, r12, #32 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: sub r1, r1, #1 +; V7A-NEXT: and r0, r1, r0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_32_a0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsl.w r1, r1, r12 +; V7A-T-NEXT: subs.w r2, r12, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_a0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: ldr r2, [sp, #8] +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: subs r0, r0, #1 +; V6M-NEXT: ands r0, r4 +; V6M-NEXT: pop {r4, pc} + %shifted = lshr i64 %val, %numskipbits + %onebit = shl i64 1, %numlowbits + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %shifted + %res = trunc i64 %masked to i32 + ret i32 %res +} + +; Shifting happens in 64-bit, then truncation. Masking is 32-bit. +define i32 @bextr64_32_a1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_32_a1: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsb.w r3, r2, #32 +; CHECK-NEXT: lsrs r0, r2 +; CHECK-NEXT: subs r2, #32 +; CHECK-NEXT: lsl.w r3, r1, r3 +; CHECK-NEXT: orr.w r0, r0, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r1, r2 +; CHECK-NEXT: ldr r1, [sp] +; CHECK-NEXT: movs r2, #1 +; CHECK-NEXT: lsl.w r1, r2, r1 +; CHECK-NEXT: subs r1, #1 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr64_32_a1: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldr r1, [sp] +; V7M-NEXT: movs r2, #1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_a1: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: mov lr, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: add r12, r3, lr, lsl r12 +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: and r0, r12, r0 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bextr64_32_a1: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsl.w r1, r1, r12 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_a1: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r7, lr} +; V6M-NEXT: push {r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldr r1, [sp, #8] +; V6M-NEXT: movs r2, #1 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: subs r1, r2, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: pop {r7, pc} + %shifted = lshr i64 %val, %numskipbits + %truncshifted = trunc i64 %shifted to i32 + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %truncshifted + ret i32 %masked +} + +; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit. +; Masking is 64-bit. Then truncation. +define i32 @bextr64_32_a2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_32_a2: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsb.w r3, r2, #32 +; CHECK-NEXT: lsrs r0, r2 +; CHECK-NEXT: subs r2, #32 +; CHECK-NEXT: lsl.w r3, r1, r3 +; CHECK-NEXT: orr.w r0, r0, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r1, r2 +; CHECK-NEXT: ldr r1, [sp] +; CHECK-NEXT: movs r2, #1 +; CHECK-NEXT: lsl.w r1, r2, r1 +; CHECK-NEXT: subs r1, #1 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr64_32_a2: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldr r1, [sp] +; V7M-NEXT: movs r2, #1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_a2: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: mov lr, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: add r12, r3, lr, lsl r12 +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: and r0, r12, r0 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bextr64_32_a2: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsl.w r1, r1, r12 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_a2: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r7, lr} +; V6M-NEXT: push {r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldr r1, [sp, #8] +; V6M-NEXT: movs r2, #1 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: subs r1, r2, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: pop {r7, pc} + %shifted = lshr i64 %val, %numskipbits + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %zextmask = zext i32 %mask to i64 + %masked = and i64 %zextmask, %shifted + %truncmasked = trunc i64 %masked to i32 + ret i32 %truncmasked +} + +; ---------------------------------------------------------------------------- ; +; Pattern b. 32-bit +; ---------------------------------------------------------------------------- ; + +define i32 @bextr32_b0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { +; CHECK-LABEL: bextr32_b0: +; CHECK: @ %bb.0: +; CHECK-NEXT: mov.w r3, #-1 +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: lsl.w r2, r3, r2 +; CHECK-NEXT: bics r0, r2 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr32_b0: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: lsl.w r2, r3, r2 +; V7M-NEXT: bics r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_b0: +; V7A: @ %bb.0: +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: bic r0, r0, r1, lsl r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_b0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: lsl.w r2, r3, r2 +; V7A-T-NEXT: bics r0, r2 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_b0: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: mvns r1, r1 +; V6M-NEXT: lsls r1, r2 +; V6M-NEXT: bics r0, r1 +; V6M-NEXT: bx lr + %shifted = lshr i32 %val, %numskipbits + %notmask = shl i32 -1, %numlowbits + %mask = xor i32 %notmask, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_b1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind { +; CHECK-LABEL: bextr32_b1_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: mov.w r3, #-1 +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: lsl.w r2, r3, r2 +; CHECK-NEXT: bics r0, r2 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr32_b1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: lsl.w r2, r3, r2 +; V7M-NEXT: bics r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_b1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: bic r0, r0, r1, lsl r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_b1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: lsl.w r2, r3, r2 +; V7A-T-NEXT: bics r0, r2 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_b1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: mvns r1, r1 +; V6M-NEXT: lsls r1, r2 +; V6M-NEXT: bics r0, r1 +; V6M-NEXT: bx lr + %skip = zext i8 %numskipbits to i32 + %shifted = lshr i32 %val, %skip + %conv = zext i8 %numlowbits to i32 + %notmask = shl i32 -1, %conv + %mask = xor i32 %notmask, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_b2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind { +; CHECK-LABEL: bextr32_b2_load: +; CHECK: @ %bb.0: +; CHECK-NEXT: ldr r0, [r0] +; CHECK-NEXT: mov.w r3, #-1 +; CHECK-NEXT: lsl.w r2, r3, r2 +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: bics r0, r2 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr32_b2_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsl.w r2, r3, r2 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bics r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_b2_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: bic r0, r0, r1, lsl r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_b2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsl.w r2, r3, r2 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bics r0, r2 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_b2_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r3, #0 +; V6M-NEXT: mvns r3, r3 +; V6M-NEXT: lsls r3, r2 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bics r0, r3 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %shifted = lshr i32 %val, %numskipbits + %notmask = shl i32 -1, %numlowbits + %mask = xor i32 %notmask, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_b3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind { +; CHECK-LABEL: bextr32_b3_load_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: ldr r0, [r0] +; CHECK-NEXT: mov.w r3, #-1 +; CHECK-NEXT: lsl.w r2, r3, r2 +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: bics r0, r2 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr32_b3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsl.w r2, r3, r2 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bics r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_b3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: bic r0, r0, r1, lsl r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_b3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsl.w r2, r3, r2 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bics r0, r2 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_b3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r3, #0 +; V6M-NEXT: mvns r3, r3 +; V6M-NEXT: lsls r3, r2 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bics r0, r3 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %skip = zext i8 %numskipbits to i32 + %shifted = lshr i32 %val, %skip + %conv = zext i8 %numlowbits to i32 + %notmask = shl i32 -1, %conv + %mask = xor i32 %notmask, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_b4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { +; CHECK-LABEL: bextr32_b4_commutative: +; CHECK: @ %bb.0: +; CHECK-NEXT: mov.w r3, #-1 +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: lsl.w r2, r3, r2 +; CHECK-NEXT: bics r0, r2 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr32_b4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: lsl.w r2, r3, r2 +; V7M-NEXT: bics r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_b4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: bic r0, r0, r1, lsl r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_b4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: lsl.w r2, r3, r2 +; V7A-T-NEXT: bics r0, r2 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_b4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: mvns r1, r1 +; V6M-NEXT: lsls r1, r2 +; V6M-NEXT: bics r0, r1 +; V6M-NEXT: bx lr + %shifted = lshr i32 %val, %numskipbits + %notmask = shl i32 -1, %numlowbits + %mask = xor i32 %notmask, -1 + %masked = and i32 %shifted, %mask ; swapped order + ret i32 %masked +} + +; 64-bit + +define i64 @bextr64_b0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_b0: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r7, lr} +; CHECK-NEXT: push {r7, lr} +; CHECK-NEXT: rsb.w r3, r2, #32 +; CHECK-NEXT: lsrs r0, r2 +; CHECK-NEXT: ldr.w r12, [sp, #8] +; CHECK-NEXT: lsl.w r3, r1, r3 +; CHECK-NEXT: orrs r0, r3 +; CHECK-NEXT: subs.w r3, r2, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r1, r3 +; CHECK-NEXT: lsr.w r1, r1, r2 +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: lsl.w r3, r2, r12 +; CHECK-NEXT: subs.w lr, r12, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl.w r2, r2, lr +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r3, #0 +; CHECK-NEXT: bics r1, r2 +; CHECK-NEXT: bics r0, r3 +; CHECK-NEXT: pop {r7, pc} +; +; V7M-LABEL: bextr64_b0: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orrs r0, r3 +; V7M-NEXT: subs.w r3, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r3 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: lsl.w r3, r2, r12 +; V7M-NEXT: subs.w lr, r12, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r2, r2, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: bics r1, r2 +; V7M-NEXT: bics r0, r3 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bextr64_b0: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsrpl r0, r1, r3 +; V7A-NEXT: lsr r1, r1, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: subs lr, r12, #32 +; V7A-NEXT: lsl r2, r3, r12 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: bic r0, r0, r2 +; V7A-NEXT: lslpl r3, r3, lr +; V7A-NEXT: bic r1, r1, r3 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bextr64_b0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, r5, r7, lr} +; V7A-T-NEXT: push {r4, r5, r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: ldr.w r12, [sp, #16] +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r5, r0, r3 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: subs.w lr, r12, #32 +; V7A-T-NEXT: lsl.w r0, r3, r12 +; V7A-T-NEXT: itt pl +; V7A-T-NEXT: lslpl.w r3, r3, lr +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: subs.w r4, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r5, r1, r4 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: bic.w r0, r5, r0 +; V7A-T-NEXT: bics r1, r3 +; V7A-T-NEXT: pop {r4, r5, r7, pc} +; +; V6M-LABEL: bextr64_b0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: mov r5, r1 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: ldr r2, [sp, #16] +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r4, r0 +; V6M-NEXT: bics r5, r1 +; V6M-NEXT: mov r0, r4 +; V6M-NEXT: mov r1, r5 +; V6M-NEXT: pop {r4, r5, r7, pc} + %shifted = lshr i64 %val, %numskipbits + %notmask = shl i64 -1, %numlowbits + %mask = xor i64 %notmask, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_b1_indexzext(i64 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind { +; CHECK-LABEL: bextr64_b1_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: lsr.w r12, r0, r2 +; CHECK-NEXT: rsb.w r0, r2, #32 +; CHECK-NEXT: lsl.w r0, r1, r0 +; CHECK-NEXT: orr.w r12, r12, r0 +; CHECK-NEXT: subs.w r0, r2, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r12, r1, r0 +; CHECK-NEXT: lsr.w r0, r1, r2 +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r0, #0 +; CHECK-NEXT: subs.w r1, r3, #32 +; CHECK-NEXT: lsl.w r3, r2, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl r2, r1 +; CHECK-NEXT: bic.w r1, r0, r2 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r3, #0 +; CHECK-NEXT: bic.w r0, r12, r3 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr64_b1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: lsr.w r12, r0, r2 +; V7M-NEXT: rsb.w r0, r2, #32 +; V7M-NEXT: lsl.w r0, r1, r0 +; V7M-NEXT: orr.w r12, r12, r0 +; V7M-NEXT: subs.w r0, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r12, r1, r0 +; V7M-NEXT: lsr.w r0, r1, r2 +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: subs.w r1, r3, #32 +; V7M-NEXT: lsl.w r3, r2, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl r2, r1 +; V7M-NEXT: bic.w r1, r0, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: bic.w r0, r12, r3 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_b1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: lsr r12, r0, r2 +; V7A-NEXT: rsb r0, r2, #32 +; V7A-NEXT: orr r12, r12, r1, lsl r0 +; V7A-NEXT: subs r0, r2, #32 +; V7A-NEXT: lsrpl r12, r1, r0 +; V7A-NEXT: lsr r0, r1, r2 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: subs r1, r3, #32 +; V7A-NEXT: mvn r2, #0 +; V7A-NEXT: lsl r3, r2, r3 +; V7A-NEXT: lslpl r2, r2, r1 +; V7A-NEXT: bic r1, r0, r2 +; V7A-NEXT: movwpl r3, #0 +; V7A-NEXT: bic r0, r12, r3 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_b1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsr.w r12, r0, r2 +; V7A-T-NEXT: rsb.w r0, r2, #32 +; V7A-T-NEXT: lsl.w r0, r1, r0 +; V7A-T-NEXT: orr.w r12, r12, r0 +; V7A-T-NEXT: subs.w r0, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r12, r1, r0 +; V7A-T-NEXT: lsr.w r0, r1, r2 +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: subs.w r1, r3, #32 +; V7A-T-NEXT: lsl.w r3, r2, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl r2, r1 +; V7A-T-NEXT: bic.w r1, r0, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r3, #0 +; V7A-T-NEXT: bic.w r0, r12, r3 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_b1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, lr} +; V6M-NEXT: push {r4, r5, r6, lr} +; V6M-NEXT: mov r4, r3 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: mov r6, r1 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r5, r0 +; V6M-NEXT: bics r6, r1 +; V6M-NEXT: mov r0, r5 +; V6M-NEXT: mov r1, r6 +; V6M-NEXT: pop {r4, r5, r6, pc} + %skip = zext i8 %numskipbits to i64 + %shifted = lshr i64 %val, %skip + %conv = zext i8 %numlowbits to i64 + %notmask = shl i64 -1, %conv + %mask = xor i64 %notmask, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_b2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_b2_load: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r7, lr} +; CHECK-NEXT: push {r7, lr} +; CHECK-NEXT: ldrd r0, r3, [r0] +; CHECK-NEXT: rsb.w r1, r2, #32 +; CHECK-NEXT: ldr.w r12, [sp, #8] +; CHECK-NEXT: lsl.w r1, r3, r1 +; CHECK-NEXT: lsrs r0, r2 +; CHECK-NEXT: orrs r0, r1 +; CHECK-NEXT: subs.w r1, r2, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r3, r1 +; CHECK-NEXT: lsr.w r1, r3, r2 +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: lsl.w r3, r2, r12 +; CHECK-NEXT: subs.w lr, r12, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl.w r2, r2, lr +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r3, #0 +; CHECK-NEXT: bics r1, r2 +; CHECK-NEXT: bics r0, r3 +; CHECK-NEXT: pop {r7, pc} +; +; V7M-LABEL: bextr64_b2_load: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: ldrd r0, r3, [r0] +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: lsl.w r1, r3, r1 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: orrs r0, r1 +; V7M-NEXT: subs.w r1, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r3, r1 +; V7M-NEXT: lsr.w r1, r3, r2 +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: lsl.w r3, r2, r12 +; V7M-NEXT: subs.w lr, r12, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r2, r2, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: bics r1, r2 +; V7M-NEXT: bics r0, r3 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bextr64_b2_load: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: ldrd r0, r1, [r0] +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsrpl r0, r1, r3 +; V7A-NEXT: lsr r1, r1, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: subs lr, r12, #32 +; V7A-NEXT: lsl r2, r3, r12 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: bic r0, r0, r2 +; V7A-NEXT: lslpl r3, r3, lr +; V7A-NEXT: bic r1, r1, r3 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bextr64_b2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: ldrd r0, r3, [r0] +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: lsl.w r1, r3, r1 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: orrs r0, r1 +; V7A-T-NEXT: subs.w r1, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r3, r1 +; V7A-T-NEXT: lsr.w r1, r3, r2 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: lsl.w r2, r3, r12 +; V7A-T-NEXT: subs.w lr, r12, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r3, r3, lr +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: bics r1, r3 +; V7A-T-NEXT: bics r0, r2 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bextr64_b2_load: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: ldr r3, [r0] +; V6M-NEXT: ldr r1, [r0, #4] +; V6M-NEXT: mov r0, r3 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: mov r5, r1 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: ldr r2, [sp, #16] +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r4, r0 +; V6M-NEXT: bics r5, r1 +; V6M-NEXT: mov r0, r4 +; V6M-NEXT: mov r1, r5 +; V6M-NEXT: pop {r4, r5, r7, pc} + %val = load i64, ptr %w + %shifted = lshr i64 %val, %numskipbits + %notmask = shl i64 -1, %numlowbits + %mask = xor i64 %notmask, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_b3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind { +; CHECK-LABEL: bextr64_b3_load_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r7, lr} +; CHECK-NEXT: push {r7, lr} +; CHECK-NEXT: ldrd r12, r0, [r0] +; CHECK-NEXT: rsb.w r3, r1, #32 +; CHECK-NEXT: lsl.w lr, r0, r3 +; CHECK-NEXT: lsr.w r3, r12, r1 +; CHECK-NEXT: orr.w r12, r3, lr +; CHECK-NEXT: subs.w r3, r1, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r12, r0, r3 +; CHECK-NEXT: lsr.w r0, r0, r1 +; CHECK-NEXT: mov.w r3, #-1 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r0, #0 +; CHECK-NEXT: subs.w r1, r2, #32 +; CHECK-NEXT: lsl.w r2, r3, r2 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl r3, r1 +; CHECK-NEXT: bic.w r1, r0, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r2, #0 +; CHECK-NEXT: bic.w r0, r12, r2 +; CHECK-NEXT: pop {r7, pc} +; +; V7M-LABEL: bextr64_b3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: ldrd r12, r0, [r0] +; V7M-NEXT: rsb.w r3, r1, #32 +; V7M-NEXT: lsl.w lr, r0, r3 +; V7M-NEXT: lsr.w r3, r12, r1 +; V7M-NEXT: orr.w r12, r3, lr +; V7M-NEXT: subs.w r3, r1, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r12, r0, r3 +; V7M-NEXT: lsr.w r0, r0, r1 +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: subs.w r1, r2, #32 +; V7M-NEXT: lsl.w r2, r3, r2 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl r3, r1 +; V7M-NEXT: bic.w r1, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: bic.w r0, r12, r2 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bextr64_b3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: ldm r0, {r0, r3} +; V7A-NEXT: lsr r12, r0, r1 +; V7A-NEXT: rsb r0, r1, #32 +; V7A-NEXT: orr r12, r12, r3, lsl r0 +; V7A-NEXT: subs r0, r1, #32 +; V7A-NEXT: lsrpl r12, r3, r0 +; V7A-NEXT: lsr r0, r3, r1 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: subs r1, r2, #32 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsl r2, r3, r2 +; V7A-NEXT: lslpl r3, r3, r1 +; V7A-NEXT: bic r1, r0, r3 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: bic r0, r12, r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_b3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: ldrd r12, r3, [r0] +; V7A-T-NEXT: rsb.w r0, r1, #32 +; V7A-T-NEXT: lsl.w lr, r3, r0 +; V7A-T-NEXT: lsr.w r0, r12, r1 +; V7A-T-NEXT: orr.w r12, r0, lr +; V7A-T-NEXT: subs.w r0, r1, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r12, r3, r0 +; V7A-T-NEXT: lsr.w r0, r3, r1 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: subs.w r1, r2, #32 +; V7A-T-NEXT: lsl.w r2, r3, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl r3, r1 +; V7A-T-NEXT: bic.w r1, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: bic.w r0, r12, r2 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bextr64_b3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, lr} +; V6M-NEXT: push {r4, r5, r6, lr} +; V6M-NEXT: mov r4, r2 +; V6M-NEXT: mov r2, r1 +; V6M-NEXT: ldr r3, [r0] +; V6M-NEXT: ldr r1, [r0, #4] +; V6M-NEXT: mov r0, r3 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: mov r6, r1 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r5, r0 +; V6M-NEXT: bics r6, r1 +; V6M-NEXT: mov r0, r5 +; V6M-NEXT: mov r1, r6 +; V6M-NEXT: pop {r4, r5, r6, pc} + %val = load i64, ptr %w + %skip = zext i8 %numskipbits to i64 + %shifted = lshr i64 %val, %skip + %conv = zext i8 %numlowbits to i64 + %notmask = shl i64 -1, %conv + %mask = xor i64 %notmask, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_b4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_b4_commutative: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r7, lr} +; CHECK-NEXT: push {r7, lr} +; CHECK-NEXT: rsb.w r3, r2, #32 +; CHECK-NEXT: lsrs r0, r2 +; CHECK-NEXT: ldr.w r12, [sp, #8] +; CHECK-NEXT: lsl.w r3, r1, r3 +; CHECK-NEXT: orrs r0, r3 +; CHECK-NEXT: subs.w r3, r2, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r1, r3 +; CHECK-NEXT: lsr.w r1, r1, r2 +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: lsl.w r3, r2, r12 +; CHECK-NEXT: subs.w lr, r12, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl.w r2, r2, lr +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r3, #0 +; CHECK-NEXT: bics r1, r2 +; CHECK-NEXT: bics r0, r3 +; CHECK-NEXT: pop {r7, pc} +; +; V7M-LABEL: bextr64_b4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orrs r0, r3 +; V7M-NEXT: subs.w r3, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r3 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: lsl.w r3, r2, r12 +; V7M-NEXT: subs.w lr, r12, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r2, r2, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: bics r1, r2 +; V7M-NEXT: bics r0, r3 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bextr64_b4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsrpl r0, r1, r3 +; V7A-NEXT: lsr r1, r1, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: subs lr, r12, #32 +; V7A-NEXT: lsl r2, r3, r12 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: bic r0, r0, r2 +; V7A-NEXT: lslpl r3, r3, lr +; V7A-NEXT: bic r1, r1, r3 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bextr64_b4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, r5, r7, lr} +; V7A-T-NEXT: push {r4, r5, r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: ldr.w r12, [sp, #16] +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r5, r0, r3 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: subs.w lr, r12, #32 +; V7A-T-NEXT: lsl.w r0, r3, r12 +; V7A-T-NEXT: itt pl +; V7A-T-NEXT: lslpl.w r3, r3, lr +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: subs.w r4, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r5, r1, r4 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: bic.w r0, r5, r0 +; V7A-T-NEXT: bics r1, r3 +; V7A-T-NEXT: pop {r4, r5, r7, pc} +; +; V6M-LABEL: bextr64_b4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: mov r5, r1 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: ldr r2, [sp, #16] +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r4, r0 +; V6M-NEXT: bics r5, r1 +; V6M-NEXT: mov r0, r4 +; V6M-NEXT: mov r1, r5 +; V6M-NEXT: pop {r4, r5, r7, pc} + %shifted = lshr i64 %val, %numskipbits + %notmask = shl i64 -1, %numlowbits + %mask = xor i64 %notmask, -1 + %masked = and i64 %shifted, %mask ; swapped order + ret i64 %masked +} + +; 64-bit, but with 32-bit output + +; Everything done in 64-bit, truncation happens last. +define i32 @bextr64_32_b0(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_32_b0: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsb.w r3, r2, #32 +; CHECK-NEXT: lsrs r0, r2 +; CHECK-NEXT: subs r2, #32 +; CHECK-NEXT: lsl.w r3, r1, r3 +; CHECK-NEXT: orr.w r0, r0, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r1, r2 +; CHECK-NEXT: ldrb.w r1, [sp] +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: lsls r2, r1 +; CHECK-NEXT: subs r1, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r2, #0 +; CHECK-NEXT: bics r0, r2 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr64_32_b0: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldrb.w r1, [sp] +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: lsls r2, r1 +; V7M-NEXT: subs r1, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: bics r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_b0: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldrb r12, [sp] +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: lsl r1, r1, r12 +; V7A-NEXT: subs r2, r12, #32 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: bic r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_32_b0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsr.w r12, r0, r2 +; V7A-T-NEXT: rsb.w r0, r2, #32 +; V7A-T-NEXT: ldrb.w r3, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r0, r1, r0 +; V7A-T-NEXT: orr.w r0, r0, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: mov.w r1, #-1 +; V7A-T-NEXT: lsls r1, r3 +; V7A-T-NEXT: subs.w r2, r3, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: bics r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_b0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: add r1, sp, #8 +; V6M-NEXT: ldrb r2, [r1] +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r4, r0 +; V6M-NEXT: mov r0, r4 +; V6M-NEXT: pop {r4, pc} + %shiftedval = lshr i64 %val, %numskipbits + %widenumlowbits = zext i8 %numlowbits to i64 + %notmask = shl nsw i64 -1, %widenumlowbits + %mask = xor i64 %notmask, -1 + %wideres = and i64 %shiftedval, %mask + %res = trunc i64 %wideres to i32 + ret i32 %res +} + +; Shifting happens in 64-bit, then truncation. Masking is 32-bit. +define i32 @bextr64_32_b1(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_32_b1: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsb.w r3, r2, #32 +; CHECK-NEXT: lsrs r0, r2 +; CHECK-NEXT: subs r2, #32 +; CHECK-NEXT: lsl.w r3, r1, r3 +; CHECK-NEXT: orr.w r0, r0, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r1, r2 +; CHECK-NEXT: ldrb.w r1, [sp] +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: lsl.w r1, r2, r1 +; CHECK-NEXT: bics r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr64_32_b1: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldrb.w r1, [sp] +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: bics r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_b1: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldrb r12, [sp] +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: bic r0, r0, r1, lsl r12 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_32_b1: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldrb.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: mov.w r1, #-1 +; V7A-T-NEXT: lsl.w r1, r1, r12 +; V7A-T-NEXT: bics r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_b1: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r7, lr} +; V6M-NEXT: push {r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: add r1, sp, #8 +; V6M-NEXT: ldrb r1, [r1] +; V6M-NEXT: movs r2, #0 +; V6M-NEXT: mvns r2, r2 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: bics r0, r2 +; V6M-NEXT: pop {r7, pc} + %shiftedval = lshr i64 %val, %numskipbits + %truncshiftedval = trunc i64 %shiftedval to i32 + %widenumlowbits = zext i8 %numlowbits to i32 + %notmask = shl nsw i32 -1, %widenumlowbits + %mask = xor i32 %notmask, -1 + %res = and i32 %truncshiftedval, %mask + ret i32 %res +} + +; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit. +; Masking is 64-bit. Then truncation. +define i32 @bextr64_32_b2(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_32_b2: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsb.w r3, r2, #32 +; CHECK-NEXT: lsrs r0, r2 +; CHECK-NEXT: subs r2, #32 +; CHECK-NEXT: lsl.w r3, r1, r3 +; CHECK-NEXT: orr.w r0, r0, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r1, r2 +; CHECK-NEXT: ldrb.w r1, [sp] +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: lsl.w r1, r2, r1 +; CHECK-NEXT: bics r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr64_32_b2: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldrb.w r1, [sp] +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: bics r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_b2: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldrb r12, [sp] +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: bic r0, r0, r1, lsl r12 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_32_b2: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldrb.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: mov.w r1, #-1 +; V7A-T-NEXT: lsl.w r1, r1, r12 +; V7A-T-NEXT: bics r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_b2: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r7, lr} +; V6M-NEXT: push {r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: add r1, sp, #8 +; V6M-NEXT: ldrb r1, [r1] +; V6M-NEXT: movs r2, #0 +; V6M-NEXT: mvns r2, r2 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: bics r0, r2 +; V6M-NEXT: pop {r7, pc} + %shiftedval = lshr i64 %val, %numskipbits + %widenumlowbits = zext i8 %numlowbits to i32 + %notmask = shl nsw i32 -1, %widenumlowbits + %mask = xor i32 %notmask, -1 + %zextmask = zext i32 %mask to i64 + %wideres = and i64 %shiftedval, %zextmask + %res = trunc i64 %wideres to i32 + ret i32 %res +} + +; ---------------------------------------------------------------------------- ; +; Pattern c. 32-bit +; ---------------------------------------------------------------------------- ; + +define i32 @bextr32_c0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { +; CHECK-LABEL: bextr32_c0: +; CHECK: @ %bb.0: +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: rsb.w r1, r2, #32 +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: lsr.w r1, r2, r1 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr32_c0: +; V7M: @ %bb.0: +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_c0: +; V7A: @ %bb.0: +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_c0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_c0: +; V6M: @ %bb.0: +; V6M-NEXT: movs r3, #32 +; V6M-NEXT: subs r2, r3, r2 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: lsls r0, r2 +; V6M-NEXT: lsrs r0, r2 +; V6M-NEXT: bx lr + %shifted = lshr i32 %val, %numskipbits + %numhighbits = sub i32 32, %numlowbits + %mask = lshr i32 -1, %numhighbits + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_c1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) nounwind { +; CHECK-LABEL: bextr32_c1_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: uxtb r1, r1 +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: rsb.w r1, r2, #32 +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: uxtb r1, r1 +; CHECK-NEXT: lsr.w r1, r2, r1 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr32_c1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_c1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_c1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_c1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #32 +; V6M-NEXT: subs r1, r1, r2 +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %skip = zext i8 %numskipbits to i32 + %shifted = lshr i32 %val, %skip + %numhighbits = sub i8 32, %numlowbits + %sh_prom = zext i8 %numhighbits to i32 + %mask = lshr i32 -1, %sh_prom + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_c2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind { +; CHECK-LABEL: bextr32_c2_load: +; CHECK: @ %bb.0: +; CHECK-NEXT: ldr r0, [r0] +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: rsb.w r1, r2, #32 +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: lsr.w r1, r2, r1 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr32_c2_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_c2_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_c2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_c2_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r3, #32 +; V6M-NEXT: subs r2, r3, r2 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: lsls r0, r2 +; V6M-NEXT: lsrs r0, r2 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %shifted = lshr i32 %val, %numskipbits + %numhighbits = sub i32 32, %numlowbits + %mask = lshr i32 -1, %numhighbits + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind { +; CHECK-LABEL: bextr32_c3_load_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: ldr r0, [r0] +; CHECK-NEXT: uxtb r1, r1 +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: rsb.w r1, r2, #32 +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: uxtb r1, r1 +; CHECK-NEXT: lsr.w r1, r2, r1 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr32_c3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_c3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_c3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_c3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #32 +; V6M-NEXT: subs r1, r1, r2 +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %skip = zext i8 %numskipbits to i32 + %shifted = lshr i32 %val, %skip + %numhighbits = sub i8 32, %numlowbits + %sh_prom = zext i8 %numhighbits to i32 + %mask = lshr i32 -1, %sh_prom + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_c4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { +; CHECK-LABEL: bextr32_c4_commutative: +; CHECK: @ %bb.0: +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: rsb.w r1, r2, #32 +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: lsr.w r1, r2, r1 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr32_c4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_c4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_c4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_c4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: movs r3, #32 +; V6M-NEXT: subs r2, r3, r2 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: lsls r0, r2 +; V6M-NEXT: lsrs r0, r2 +; V6M-NEXT: bx lr + %shifted = lshr i32 %val, %numskipbits + %numhighbits = sub i32 32, %numlowbits + %mask = lshr i32 -1, %numhighbits + %masked = and i32 %shifted, %mask ; swapped order + ret i32 %masked +} + +; 64-bit + +define i64 @bextr64_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_c0: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsb.w r3, r2, #32 +; CHECK-NEXT: lsrs r0, r2 +; CHECK-NEXT: ldr.w r12, [sp] +; CHECK-NEXT: lsl.w r3, r1, r3 +; CHECK-NEXT: orrs r0, r3 +; CHECK-NEXT: subs.w r3, r2, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r1, r3 +; CHECK-NEXT: rsb.w r3, r12, #64 +; CHECK-NEXT: lsr.w r1, r1, r2 +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: lsr.w r3, r2, r3 +; CHECK-NEXT: rsbs.w r12, r12, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r3, #0 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r2, r2, r12 +; CHECK-NEXT: ands r1, r3 +; CHECK-NEXT: ands r0, r2 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr64_c0: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: ldr.w r12, [sp] +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orrs r0, r3 +; V7M-NEXT: subs.w r3, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r3 +; V7M-NEXT: rsb.w r3, r12, #64 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: lsr.w r3, r2, r3 +; V7M-NEXT: rsbs.w r12, r12, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r2, r2, r12 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_c0: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r5, r11, lr} +; V7A-NEXT: push {r4, r5, r11, lr} +; V7A-NEXT: ldr r12, [sp, #16] +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsr r5, r1, r2 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: rsb r4, r12, #64 +; V7A-NEXT: rsbs lr, r12, #32 +; V7A-NEXT: lsr r4, r3, r4 +; V7A-NEXT: lsrpl r3, r3, lr +; V7A-NEXT: movwpl r4, #0 +; V7A-NEXT: subs lr, r2, #32 +; V7A-NEXT: rsb r2, r2, #32 +; V7A-NEXT: movwpl r5, #0 +; V7A-NEXT: and r12, r4, r5 +; V7A-NEXT: orr r0, r0, r1, lsl r2 +; V7A-NEXT: lsrpl r0, r1, lr +; V7A-NEXT: mov r1, r12 +; V7A-NEXT: and r0, r3, r0 +; V7A-NEXT: pop {r4, r5, r11, pc} +; +; V7A-T-LABEL: bextr64_c0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: mov.w lr, #-1 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orrs r0, r3 +; V7A-T-NEXT: subs.w r3, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r3 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: rsbs.w r2, r12, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl r3, r2 +; V7A-T-NEXT: rsb.w r2, r12, #64 +; V7A-T-NEXT: and.w r0, r0, r3 +; V7A-T-NEXT: lsr.w r2, lr, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: ands r1, r2 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bextr64_c0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: ldr r0, [sp, #16] +; V6M-NEXT: movs r1, #64 +; V6M-NEXT: subs r2, r1, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %shifted = lshr i64 %val, %numskipbits + %numhighbits = sub i64 64, %numlowbits + %mask = lshr i64 -1, %numhighbits + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_c1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_c1_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r7, lr} +; CHECK-NEXT: push {r7, lr} +; CHECK-NEXT: uxtb r2, r2 +; CHECK-NEXT: lsr.w r12, r0, r2 +; CHECK-NEXT: rsb.w r0, r2, #32 +; CHECK-NEXT: lsl.w r0, r1, r0 +; CHECK-NEXT: orr.w r12, r12, r0 +; CHECK-NEXT: subs.w r0, r2, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r12, r1, r0 +; CHECK-NEXT: rsb.w r0, r3, #64 +; CHECK-NEXT: lsr.w r1, r1, r2 +; CHECK-NEXT: mov.w r3, #-1 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: uxtb r0, r0 +; CHECK-NEXT: subs.w lr, r0, #32 +; CHECK-NEXT: lsr.w r2, r3, r0 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r3, r3, lr +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r2, #0 +; CHECK-NEXT: and.w r0, r3, r12 +; CHECK-NEXT: ands r1, r2 +; CHECK-NEXT: pop {r7, pc} +; +; V7M-LABEL: bextr64_c1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: uxtb r2, r2 +; V7M-NEXT: lsr.w r12, r0, r2 +; V7M-NEXT: rsb.w r0, r2, #32 +; V7M-NEXT: lsl.w r0, r1, r0 +; V7M-NEXT: orr.w r12, r12, r0 +; V7M-NEXT: subs.w r0, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r12, r1, r0 +; V7M-NEXT: rsb.w r0, r3, #64 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: uxtb r0, r0 +; V7M-NEXT: subs.w lr, r0, #32 +; V7M-NEXT: lsr.w r2, r3, r0 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r3, r3, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: and.w r0, r3, r12 +; V7M-NEXT: ands r1, r2 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bextr64_c1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, lr} +; V7A-NEXT: push {r4, lr} +; V7A-NEXT: uxtb r12, r2 +; V7A-NEXT: lsr lr, r0, r12 +; V7A-NEXT: rsb r0, r12, #32 +; V7A-NEXT: orr r4, lr, r1, lsl r0 +; V7A-NEXT: mvn lr, #31 +; V7A-NEXT: uxtab r2, lr, r2 +; V7A-NEXT: cmp r2, #0 +; V7A-NEXT: lsrpl r4, r1, r2 +; V7A-NEXT: rsb r2, r3, #64 +; V7A-NEXT: lsr r1, r1, r12 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: uxtb r12, r2 +; V7A-NEXT: uxtab r2, lr, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: lsr r0, r3, r12 +; V7A-NEXT: cmp r2, #0 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: and r1, r0, r1 +; V7A-NEXT: lsrpl r3, r3, r2 +; V7A-NEXT: and r0, r3, r4 +; V7A-NEXT: pop {r4, pc} +; +; V7A-T-LABEL: bextr64_c1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: uxtb.w r12, r2 +; V7A-T-NEXT: lsr.w lr, r0, r12 +; V7A-T-NEXT: rsb.w r0, r12, #32 +; V7A-T-NEXT: lsl.w r0, r1, r0 +; V7A-T-NEXT: orr.w r4, lr, r0 +; V7A-T-NEXT: mvn lr, #31 +; V7A-T-NEXT: uxtab r2, lr, r2 +; V7A-T-NEXT: cmp r2, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r4, r1, r2 +; V7A-T-NEXT: rsb.w r2, r3, #64 +; V7A-T-NEXT: lsr.w r1, r1, r12 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: uxtb.w r12, r2 +; V7A-T-NEXT: uxtab r2, lr, r2 +; V7A-T-NEXT: lsr.w r0, r3, r12 +; V7A-T-NEXT: cmp r2, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: and.w r1, r1, r0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl r3, r2 +; V7A-T-NEXT: and.w r0, r3, r4 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_c1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, lr} +; V6M-NEXT: push {r4, r5, r6, lr} +; V6M-NEXT: mov r5, r3 +; V6M-NEXT: uxtb r2, r2 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r6, r0 +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: movs r0, #64 +; V6M-NEXT: subs r0, r0, r5 +; V6M-NEXT: uxtb r2, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r6 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r6, pc} + %skip = zext i8 %numskipbits to i64 + %shifted = lshr i64 %val, %skip + %numhighbits = sub i8 64, %numlowbits + %sh_prom = zext i8 %numhighbits to i64 + %mask = lshr i64 -1, %sh_prom + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_c2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_c2_load: +; CHECK: @ %bb.0: +; CHECK-NEXT: ldrd r0, r3, [r0] +; CHECK-NEXT: rsb.w r1, r2, #32 +; CHECK-NEXT: ldr.w r12, [sp] +; CHECK-NEXT: lsl.w r1, r3, r1 +; CHECK-NEXT: lsrs r0, r2 +; CHECK-NEXT: orrs r0, r1 +; CHECK-NEXT: subs.w r1, r2, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r3, r1 +; CHECK-NEXT: lsr.w r1, r3, r2 +; CHECK-NEXT: rsb.w r3, r12, #64 +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: rsbs.w r12, r12, #32 +; CHECK-NEXT: lsr.w r3, r2, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r3, #0 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r2, r2, r12 +; CHECK-NEXT: ands r1, r3 +; CHECK-NEXT: ands r0, r2 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr64_c2_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldrd r0, r3, [r0] +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: ldr.w r12, [sp] +; V7M-NEXT: lsl.w r1, r3, r1 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: orrs r0, r1 +; V7M-NEXT: subs.w r1, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r3, r1 +; V7M-NEXT: lsr.w r1, r3, r2 +; V7M-NEXT: rsb.w r3, r12, #64 +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: rsbs.w r12, r12, #32 +; V7M-NEXT: lsr.w r3, r2, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r2, r2, r12 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_c2_load: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r6, r8, lr} +; V7A-NEXT: push {r4, r6, r8, lr} +; V7A-NEXT: ldr r12, [sp, #16] +; V7A-NEXT: ldr r3, [r0, #4] +; V7A-NEXT: rsb r6, r12, #64 +; V7A-NEXT: ldr r8, [r0] +; V7A-NEXT: mvn r0, #0 +; V7A-NEXT: rsbs r1, r12, #32 +; V7A-NEXT: lsr r6, r0, r6 +; V7A-NEXT: lsr r4, r3, r2 +; V7A-NEXT: lsrpl r0, r0, r1 +; V7A-NEXT: movwpl r6, #0 +; V7A-NEXT: subs r12, r2, #32 +; V7A-NEXT: movwpl r4, #0 +; V7A-NEXT: and r1, r6, r4 +; V7A-NEXT: lsr r6, r8, r2 +; V7A-NEXT: rsb r2, r2, #32 +; V7A-NEXT: orr r2, r6, r3, lsl r2 +; V7A-NEXT: lsrpl r2, r3, r12 +; V7A-NEXT: and r0, r0, r2 +; V7A-NEXT: pop {r4, r6, r8, pc} +; +; V7A-T-LABEL: bextr64_c2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldrd r0, r3, [r0] +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: ldr.w r12, [sp] +; V7A-T-NEXT: lsl.w r1, r3, r1 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: orrs r0, r1 +; V7A-T-NEXT: subs.w r1, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r3, r1 +; V7A-T-NEXT: lsr.w r1, r3, r2 +; V7A-T-NEXT: rsb.w r2, r12, #64 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: rsbs.w r12, r12, #32 +; V7A-T-NEXT: lsr.w r2, r3, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r3, r3, r12 +; V7A-T-NEXT: ands r1, r2 +; V7A-T-NEXT: ands r0, r3 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_c2_load: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: ldr r3, [r0] +; V6M-NEXT: ldr r1, [r0, #4] +; V6M-NEXT: mov r0, r3 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: ldr r0, [sp, #16] +; V6M-NEXT: movs r1, #64 +; V6M-NEXT: subs r2, r1, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %val = load i64, ptr %w + %shifted = lshr i64 %val, %numskipbits + %numhighbits = sub i64 64, %numlowbits + %mask = lshr i64 -1, %numhighbits + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_c3_load_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r7, lr} +; CHECK-NEXT: push {r7, lr} +; CHECK-NEXT: ldrd r0, r3, [r0] +; CHECK-NEXT: uxtb r1, r1 +; CHECK-NEXT: lsr.w r12, r0, r1 +; CHECK-NEXT: rsb.w r0, r1, #32 +; CHECK-NEXT: lsl.w r0, r3, r0 +; CHECK-NEXT: orr.w r12, r12, r0 +; CHECK-NEXT: subs.w r0, r1, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r12, r3, r0 +; CHECK-NEXT: rsb.w r0, r2, #64 +; CHECK-NEXT: lsr.w r1, r3, r1 +; CHECK-NEXT: mov.w r3, #-1 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: uxtb r0, r0 +; CHECK-NEXT: subs.w lr, r0, #32 +; CHECK-NEXT: lsr.w r2, r3, r0 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r3, r3, lr +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r2, #0 +; CHECK-NEXT: and.w r0, r3, r12 +; CHECK-NEXT: ands r1, r2 +; CHECK-NEXT: pop {r7, pc} +; +; V7M-LABEL: bextr64_c3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: ldrd r0, r3, [r0] +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsr.w r12, r0, r1 +; V7M-NEXT: rsb.w r0, r1, #32 +; V7M-NEXT: lsl.w r0, r3, r0 +; V7M-NEXT: orr.w r12, r12, r0 +; V7M-NEXT: subs.w r0, r1, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r12, r3, r0 +; V7M-NEXT: rsb.w r0, r2, #64 +; V7M-NEXT: lsr.w r1, r3, r1 +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: uxtb r0, r0 +; V7M-NEXT: subs.w lr, r0, #32 +; V7M-NEXT: lsr.w r2, r3, r0 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r3, r3, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: and.w r0, r3, r12 +; V7M-NEXT: ands r1, r2 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bextr64_c3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, lr} +; V7A-NEXT: push {r4, lr} +; V7A-NEXT: ldr r4, [r0] +; V7A-NEXT: ldr r3, [r0, #4] +; V7A-NEXT: uxtb r0, r1 +; V7A-NEXT: lsr r12, r4, r0 +; V7A-NEXT: rsb r4, r0, #32 +; V7A-NEXT: lsr r0, r3, r0 +; V7A-NEXT: orr lr, r12, r3, lsl r4 +; V7A-NEXT: mvn r12, #31 +; V7A-NEXT: uxtab r1, r12, r1 +; V7A-NEXT: cmp r1, #0 +; V7A-NEXT: lsrpl lr, r3, r1 +; V7A-NEXT: rsb r1, r2, #64 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: uxtb r2, r1 +; V7A-NEXT: uxtab r4, r12, r1 +; V7A-NEXT: lsr r2, r3, r2 +; V7A-NEXT: cmp r4, #0 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: and r1, r2, r0 +; V7A-NEXT: lsrpl r3, r3, r4 +; V7A-NEXT: and r0, r3, lr +; V7A-NEXT: pop {r4, pc} +; +; V7A-T-LABEL: bextr64_c3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, r5, r7, lr} +; V7A-T-NEXT: push {r4, r5, r7, lr} +; V7A-T-NEXT: ldrd r12, lr, [r0] +; V7A-T-NEXT: uxtb r0, r1 +; V7A-T-NEXT: rsb.w r3, r0, #32 +; V7A-T-NEXT: lsl.w r4, lr, r3 +; V7A-T-NEXT: lsr.w r3, r12, r0 +; V7A-T-NEXT: orr.w r5, r3, r4 +; V7A-T-NEXT: mvn r12, #31 +; V7A-T-NEXT: uxtab r1, r12, r1 +; V7A-T-NEXT: lsr.w r0, lr, r0 +; V7A-T-NEXT: cmp r1, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r5, lr, r1 +; V7A-T-NEXT: rsb.w r1, r2, #64 +; V7A-T-NEXT: mov.w r4, #-1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: uxtb r2, r1 +; V7A-T-NEXT: uxtab r3, r12, r1 +; V7A-T-NEXT: lsr.w r2, r4, r2 +; V7A-T-NEXT: cmp r3, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: and.w r1, r2, r0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl r4, r3 +; V7A-T-NEXT: and.w r0, r4, r5 +; V7A-T-NEXT: pop {r4, r5, r7, pc} +; +; V6M-LABEL: bextr64_c3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, lr} +; V6M-NEXT: push {r4, r5, r6, lr} +; V6M-NEXT: mov r5, r2 +; V6M-NEXT: ldr r4, [r0] +; V6M-NEXT: ldr r3, [r0, #4] +; V6M-NEXT: uxtb r2, r1 +; V6M-NEXT: mov r0, r4 +; V6M-NEXT: mov r1, r3 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r6, r0 +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: movs r0, #64 +; V6M-NEXT: subs r0, r0, r5 +; V6M-NEXT: uxtb r2, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r6 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r6, pc} + %val = load i64, ptr %w + %skip = zext i8 %numskipbits to i64 + %shifted = lshr i64 %val, %skip + %numhighbits = sub i8 64, %numlowbits + %sh_prom = zext i8 %numhighbits to i64 + %mask = lshr i64 -1, %sh_prom + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_c4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_c4_commutative: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsb.w r3, r2, #32 +; CHECK-NEXT: lsrs r0, r2 +; CHECK-NEXT: ldr.w r12, [sp] +; CHECK-NEXT: lsl.w r3, r1, r3 +; CHECK-NEXT: orrs r0, r3 +; CHECK-NEXT: subs.w r3, r2, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r1, r3 +; CHECK-NEXT: rsb.w r3, r12, #64 +; CHECK-NEXT: lsr.w r1, r1, r2 +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: lsr.w r3, r2, r3 +; CHECK-NEXT: rsbs.w r12, r12, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r3, #0 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r2, r2, r12 +; CHECK-NEXT: ands r1, r3 +; CHECK-NEXT: ands r0, r2 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr64_c4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: ldr.w r12, [sp] +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orrs r0, r3 +; V7M-NEXT: subs.w r3, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r3 +; V7M-NEXT: rsb.w r3, r12, #64 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: lsr.w r3, r2, r3 +; V7M-NEXT: rsbs.w r12, r12, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r2, r2, r12 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_c4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r5, r11, lr} +; V7A-NEXT: push {r4, r5, r11, lr} +; V7A-NEXT: ldr r12, [sp, #16] +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsr r5, r1, r2 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: rsb r4, r12, #64 +; V7A-NEXT: rsbs lr, r12, #32 +; V7A-NEXT: lsr r4, r3, r4 +; V7A-NEXT: lsrpl r3, r3, lr +; V7A-NEXT: movwpl r4, #0 +; V7A-NEXT: subs lr, r2, #32 +; V7A-NEXT: rsb r2, r2, #32 +; V7A-NEXT: movwpl r5, #0 +; V7A-NEXT: and r12, r5, r4 +; V7A-NEXT: orr r0, r0, r1, lsl r2 +; V7A-NEXT: lsrpl r0, r1, lr +; V7A-NEXT: mov r1, r12 +; V7A-NEXT: and r0, r0, r3 +; V7A-NEXT: pop {r4, r5, r11, pc} +; +; V7A-T-LABEL: bextr64_c4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: mov.w lr, #-1 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orrs r0, r3 +; V7A-T-NEXT: subs.w r3, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r3 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: rsbs.w r2, r12, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl r3, r2 +; V7A-T-NEXT: rsb.w r2, r12, #64 +; V7A-T-NEXT: and.w r0, r0, r3 +; V7A-T-NEXT: lsr.w r2, lr, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: ands r1, r2 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bextr64_c4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: ldr r0, [sp, #16] +; V6M-NEXT: movs r1, #64 +; V6M-NEXT: subs r2, r1, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %shifted = lshr i64 %val, %numskipbits + %numhighbits = sub i64 64, %numlowbits + %mask = lshr i64 -1, %numhighbits + %masked = and i64 %shifted, %mask ; swapped order + ret i64 %masked +} + +; 64-bit, but with 32-bit output + +; Everything done in 64-bit, truncation happens last. +define i32 @bextr64_32_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_32_c0: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsb.w r3, r2, #32 +; CHECK-NEXT: lsrs r0, r2 +; CHECK-NEXT: subs r2, #32 +; CHECK-NEXT: lsl.w r3, r1, r3 +; CHECK-NEXT: orr.w r0, r0, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r1, r2 +; CHECK-NEXT: ldr r1, [sp] +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: rsbs.w r1, r1, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl r2, r1 +; CHECK-NEXT: ands r0, r2 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr64_32_c0: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldr r1, [sp] +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: rsbs.w r1, r1, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl r2, r1 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_c0: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r3, [sp] +; V7A-NEXT: rsbs r12, r3, #32 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsrpl r3, r3, r12 +; V7A-NEXT: lsr r12, r0, r2 +; V7A-NEXT: rsb r0, r2, #32 +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r12, r1, lsl r0 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: and r0, r3, r0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_32_c0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: rsbs.w r1, r12, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl r2, r1 +; V7A-T-NEXT: ands r0, r2 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_c0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: ldr r0, [sp, #8] +; V6M-NEXT: movs r1, #64 +; V6M-NEXT: subs r2, r1, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r4 +; V6M-NEXT: pop {r4, pc} + %shifted = lshr i64 %val, %numskipbits + %numhighbits = sub i64 64, %numlowbits + %mask = lshr i64 -1, %numhighbits + %masked = and i64 %mask, %shifted + %res = trunc i64 %masked to i32 + ret i32 %res +} + +; Shifting happens in 64-bit, then truncation. Masking is 32-bit. +define i32 @bextr64_32_c1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_32_c1: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsb.w r3, r2, #32 +; CHECK-NEXT: lsrs r0, r2 +; CHECK-NEXT: subs r2, #32 +; CHECK-NEXT: lsl.w r3, r1, r3 +; CHECK-NEXT: orr.w r0, r0, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r1, r2 +; CHECK-NEXT: ldr r1, [sp] +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: rsb.w r1, r1, #32 +; CHECK-NEXT: lsr.w r1, r2, r1 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr64_32_c1: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldr r1, [sp] +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_c1: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldr r12, [sp] +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: rsb r1, r12, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_32_c1: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: rsb.w r1, r12, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_c1: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r7, lr} +; V6M-NEXT: push {r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldr r1, [sp, #8] +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: pop {r7, pc} + %shifted = lshr i64 %val, %numskipbits + %truncshifted = trunc i64 %shifted to i32 + %numhighbits = sub i32 32, %numlowbits + %mask = lshr i32 -1, %numhighbits + %masked = and i32 %mask, %truncshifted + ret i32 %masked +} + +; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit. +; Masking is 64-bit. Then truncation. +define i32 @bextr64_32_c2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_32_c2: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsb.w r3, r2, #32 +; CHECK-NEXT: lsrs r0, r2 +; CHECK-NEXT: subs r2, #32 +; CHECK-NEXT: lsl.w r3, r1, r3 +; CHECK-NEXT: orr.w r0, r0, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r1, r2 +; CHECK-NEXT: ldr r1, [sp] +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: rsb.w r1, r1, #32 +; CHECK-NEXT: lsr.w r1, r2, r1 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr64_32_c2: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldr r1, [sp] +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_c2: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldr r12, [sp] +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: rsb r1, r12, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_32_c2: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: rsb.w r1, r12, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_c2: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r7, lr} +; V6M-NEXT: push {r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldr r1, [sp, #8] +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: pop {r7, pc} + %shifted = lshr i64 %val, %numskipbits + %numhighbits = sub i32 32, %numlowbits + %mask = lshr i32 -1, %numhighbits + %zextmask = zext i32 %mask to i64 + %masked = and i64 %zextmask, %shifted + %truncmasked = trunc i64 %masked to i32 + ret i32 %truncmasked +} + +; ---------------------------------------------------------------------------- ; +; Pattern d. 32-bit. +; ---------------------------------------------------------------------------- ; + +define i32 @bextr32_d0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { +; CHECK-LABEL: bextr32_d0: +; CHECK: @ %bb.0: +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: rsb.w r1, r2, #32 +; CHECK-NEXT: lsls r0, r1 +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr32_d0: +; V7M: @ %bb.0: +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_d0: +; V7A: @ %bb.0: +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_d0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_d0: +; V6M: @ %bb.0: +; V6M-NEXT: movs r3, #32 +; V6M-NEXT: subs r2, r3, r2 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: lsls r0, r2 +; V6M-NEXT: lsrs r0, r2 +; V6M-NEXT: bx lr + %shifted = lshr i32 %val, %numskipbits + %numhighbits = sub i32 32, %numlowbits + %highbitscleared = shl i32 %shifted, %numhighbits + %masked = lshr i32 %highbitscleared, %numhighbits + ret i32 %masked +} + +define i32 @bextr32_d1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) nounwind { +; CHECK-LABEL: bextr32_d1_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: uxtb r1, r1 +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: rsb.w r1, r2, #32 +; CHECK-NEXT: uxtb r1, r1 +; CHECK-NEXT: lsls r0, r1 +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr32_d1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_d1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_d1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_d1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #32 +; V6M-NEXT: subs r1, r1, r2 +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %skip = zext i8 %numskipbits to i32 + %shifted = lshr i32 %val, %skip + %numhighbits = sub i8 32, %numlowbits + %sh_prom = zext i8 %numhighbits to i32 + %highbitscleared = shl i32 %shifted, %sh_prom + %masked = lshr i32 %highbitscleared, %sh_prom + ret i32 %masked +} + +define i32 @bextr32_d2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind { +; CHECK-LABEL: bextr32_d2_load: +; CHECK: @ %bb.0: +; CHECK-NEXT: ldr r0, [r0] +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: rsb.w r1, r2, #32 +; CHECK-NEXT: lsls r0, r1 +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr32_d2_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_d2_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_d2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_d2_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r3, #32 +; V6M-NEXT: subs r2, r3, r2 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: lsls r0, r2 +; V6M-NEXT: lsrs r0, r2 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %shifted = lshr i32 %val, %numskipbits + %numhighbits = sub i32 32, %numlowbits + %highbitscleared = shl i32 %shifted, %numhighbits + %masked = lshr i32 %highbitscleared, %numhighbits + ret i32 %masked +} + +define i32 @bextr32_d3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind { +; CHECK-LABEL: bextr32_d3_load_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: ldr r0, [r0] +; CHECK-NEXT: uxtb r1, r1 +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: rsb.w r1, r2, #32 +; CHECK-NEXT: uxtb r1, r1 +; CHECK-NEXT: lsls r0, r1 +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr32_d3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_d3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_d3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_d3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #32 +; V6M-NEXT: subs r1, r1, r2 +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %skip = zext i8 %numskipbits to i32 + %shifted = lshr i32 %val, %skip + %numhighbits = sub i8 32, %numlowbits + %sh_prom = zext i8 %numhighbits to i32 + %highbitscleared = shl i32 %shifted, %sh_prom + %masked = lshr i32 %highbitscleared, %sh_prom + ret i32 %masked +} + +; 64-bit. + +define i64 @bextr64_d0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_d0: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r4, lr} +; CHECK-NEXT: push {r4, lr} +; CHECK-NEXT: rsb.w r3, r2, #32 +; CHECK-NEXT: ldr.w r12, [sp, #8] +; CHECK-NEXT: lsrs r0, r2 +; CHECK-NEXT: lsl.w r3, r1, r3 +; CHECK-NEXT: orrs r0, r3 +; CHECK-NEXT: subs.w r3, r2, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r1, r3 +; CHECK-NEXT: lsr.w r1, r1, r2 +; CHECK-NEXT: rsb.w r3, r12, #64 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: rsb.w lr, r12, #32 +; CHECK-NEXT: rsb.w r12, r3, #32 +; CHECK-NEXT: lsls r1, r3 +; CHECK-NEXT: cmp.w lr, #0 +; CHECK-NEXT: lsr.w r4, r0, r12 +; CHECK-NEXT: orr.w r1, r1, r4 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl.w r1, r0, lr +; CHECK-NEXT: lsl.w r0, r0, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r0, #0 +; CHECK-NEXT: lsl.w r2, r1, r12 +; CHECK-NEXT: lsr.w r0, r0, r3 +; CHECK-NEXT: orr.w r0, r0, r2 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r1, lr +; CHECK-NEXT: lsr.w r1, r1, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: pop {r4, pc} +; +; V7M-LABEL: bextr64_d0: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orrs r0, r3 +; V7M-NEXT: subs.w r3, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r3 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: rsb.w r3, r12, #64 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: rsb.w lr, r12, #32 +; V7M-NEXT: rsb.w r12, r3, #32 +; V7M-NEXT: lsls r1, r3 +; V7M-NEXT: cmp.w lr, #0 +; V7M-NEXT: lsr.w r4, r0, r12 +; V7M-NEXT: orr.w r1, r1, r4 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, r0, lr +; V7M-NEXT: lsl.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsl.w r2, r1, r12 +; V7M-NEXT: lsr.w r0, r0, r3 +; V7M-NEXT: orr.w r0, r0, r2 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, lr +; V7M-NEXT: lsr.w r1, r1, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_d0: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: lsr r3, r1, r2 +; V7A-NEXT: subs lr, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: rsb r2, r2, #32 +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: movwpl r3, #0 +; V7A-NEXT: orr r0, r0, r1, lsl r2 +; V7A-NEXT: lsrpl r0, r1, lr +; V7A-NEXT: rsb r1, r12, #64 +; V7A-NEXT: rsb lr, r1, #32 +; V7A-NEXT: lsr r2, r0, lr +; V7A-NEXT: orr r2, r2, r3, lsl r1 +; V7A-NEXT: rsbs r3, r12, #32 +; V7A-NEXT: lslpl r2, r0, r3 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: lsr r1, r2, r1 +; V7A-NEXT: orr r0, r0, r2, lsl lr +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: lsrpl r0, r2, r3 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bextr64_d0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orrs r0, r3 +; V7A-T-NEXT: subs.w r3, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r3 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: rsb.w r3, r12, #64 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: rsb.w lr, r3, #32 +; V7A-T-NEXT: lsls r1, r3 +; V7A-T-NEXT: rsbs.w r2, r12, #32 +; V7A-T-NEXT: lsr.w r4, r0, lr +; V7A-T-NEXT: orr.w r1, r1, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r1, r0, r2 +; V7A-T-NEXT: lsl.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsl.w r4, r1, lr +; V7A-T-NEXT: lsr.w r0, r0, r3 +; V7A-T-NEXT: orr.w r0, r0, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: lsr.w r1, r1, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_d0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldr r2, [sp, #8] +; V6M-NEXT: movs r3, #64 +; V6M-NEXT: subs r4, r3, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, pc} + %shifted = lshr i64 %val, %numskipbits + %numhighbits = sub i64 64, %numlowbits + %highbitscleared = shl i64 %shifted, %numhighbits + %masked = lshr i64 %highbitscleared, %numhighbits + ret i64 %masked +} + +define i64 @bextr64_d1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_d1_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r4, lr} +; CHECK-NEXT: push {r4, lr} +; CHECK-NEXT: uxtb.w lr, r2 +; CHECK-NEXT: subs.w r2, lr, #32 +; CHECK-NEXT: lsr.w r12, r0, lr +; CHECK-NEXT: rsb.w r0, lr, #32 +; CHECK-NEXT: lsl.w r0, r1, r0 +; CHECK-NEXT: orr.w r0, r0, r12 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r1, r2 +; CHECK-NEXT: rsb.w r2, r3, #64 +; CHECK-NEXT: lsr.w r1, r1, lr +; CHECK-NEXT: uxtb r2, r2 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: rsb.w r12, r2, #32 +; CHECK-NEXT: lsls r1, r2 +; CHECK-NEXT: sub.w r3, r2, #32 +; CHECK-NEXT: lsr.w r4, r0, r12 +; CHECK-NEXT: orrs r1, r4 +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl.w r1, r0, r3 +; CHECK-NEXT: lsl.w r0, r0, r2 +; CHECK-NEXT: lsl.w r4, r1, r12 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r0, #0 +; CHECK-NEXT: lsr.w r0, r0, r2 +; CHECK-NEXT: orr.w r0, r0, r4 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r1, r3 +; CHECK-NEXT: lsr.w r1, r1, r2 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: pop {r4, pc} +; +; V7M-LABEL: bextr64_d1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: uxtb.w lr, r2 +; V7M-NEXT: subs.w r2, lr, #32 +; V7M-NEXT: lsr.w r12, r0, lr +; V7M-NEXT: rsb.w r0, lr, #32 +; V7M-NEXT: lsl.w r0, r1, r0 +; V7M-NEXT: orr.w r0, r0, r12 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: rsb.w r2, r3, #64 +; V7M-NEXT: lsr.w r1, r1, lr +; V7M-NEXT: uxtb r2, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: rsb.w r12, r2, #32 +; V7M-NEXT: lsls r1, r2 +; V7M-NEXT: sub.w r3, r2, #32 +; V7M-NEXT: lsr.w r4, r0, r12 +; V7M-NEXT: orrs r1, r4 +; V7M-NEXT: cmp r3, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, r0, r3 +; V7M-NEXT: lsl.w r0, r0, r2 +; V7M-NEXT: lsl.w r4, r1, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsr.w r0, r0, r2 +; V7M-NEXT: orr.w r0, r0, r4 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r3 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_d1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r5, r11, lr} +; V7A-NEXT: push {r4, r5, r11, lr} +; V7A-NEXT: uxtb r12, r2 +; V7A-NEXT: lsr lr, r0, r12 +; V7A-NEXT: rsb r0, r12, #32 +; V7A-NEXT: orr r0, lr, r1, lsl r0 +; V7A-NEXT: mvn lr, #31 +; V7A-NEXT: uxtab r2, lr, r2 +; V7A-NEXT: cmp r2, #0 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: rsb r2, r3, #64 +; V7A-NEXT: lsr r1, r1, r12 +; V7A-NEXT: uxtb r3, r2 +; V7A-NEXT: rsb r4, r3, #32 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: uxtab r2, lr, r2 +; V7A-NEXT: lsr r5, r0, r4 +; V7A-NEXT: orr r1, r5, r1, lsl r3 +; V7A-NEXT: cmp r2, #0 +; V7A-NEXT: lslpl r1, r0, r2 +; V7A-NEXT: lsl r0, r0, r3 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lsr r0, r0, r3 +; V7A-NEXT: orr r0, r0, r1, lsl r4 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: lsr r1, r1, r3 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: pop {r4, r5, r11, pc} +; +; V7A-T-LABEL: bextr64_d1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, r5, r6, r7, lr} +; V7A-T-NEXT: push {r4, r5, r6, r7, lr} +; V7A-T-NEXT: uxtb.w r12, r2 +; V7A-T-NEXT: rsb.w r6, r12, #32 +; V7A-T-NEXT: rsb.w r3, r3, #64 +; V7A-T-NEXT: lsr.w r0, r0, r12 +; V7A-T-NEXT: mvn r7, #31 +; V7A-T-NEXT: uxtab r2, r7, r2 +; V7A-T-NEXT: lsl.w r6, r1, r6 +; V7A-T-NEXT: lsr.w lr, r1, r12 +; V7A-T-NEXT: orrs r0, r6 +; V7A-T-NEXT: cmp r2, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl.w lr, #0 +; V7A-T-NEXT: uxtb r5, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: rsb.w r1, r5, #32 +; V7A-T-NEXT: uxtab r3, r7, r3 +; V7A-T-NEXT: lsl.w r4, lr, r5 +; V7A-T-NEXT: lsr.w r2, r0, r1 +; V7A-T-NEXT: cmp r3, #0 +; V7A-T-NEXT: orr.w r2, r2, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r2, r0, r3 +; V7A-T-NEXT: lsl.w r0, r0, r5 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: lsr.w r0, r0, r5 +; V7A-T-NEXT: orr.w r0, r0, r1 +; V7A-T-NEXT: lsr.w r1, r2, r5 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r2, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: pop {r4, r5, r6, r7, pc} +; +; V6M-LABEL: bextr64_d1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: mov r4, r3 +; V6M-NEXT: uxtb r2, r2 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: movs r2, #64 +; V6M-NEXT: subs r2, r2, r4 +; V6M-NEXT: uxtb r4, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, pc} + %skip = zext i8 %numskipbits to i64 + %shifted = lshr i64 %val, %skip + %numhighbits = sub i8 64, %numlowbits + %sh_prom = zext i8 %numhighbits to i64 + %highbitscleared = shl i64 %shifted, %sh_prom + %masked = lshr i64 %highbitscleared, %sh_prom + ret i64 %masked +} + +define i64 @bextr64_d2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_d2_load: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r4, lr} +; CHECK-NEXT: push {r4, lr} +; CHECK-NEXT: ldrd r0, r3, [r0] +; CHECK-NEXT: rsb.w r1, r2, #32 +; CHECK-NEXT: ldr.w r12, [sp, #8] +; CHECK-NEXT: lsl.w r1, r3, r1 +; CHECK-NEXT: lsrs r0, r2 +; CHECK-NEXT: rsb.w lr, r12, #32 +; CHECK-NEXT: orrs r0, r1 +; CHECK-NEXT: subs.w r1, r2, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r3, r1 +; CHECK-NEXT: rsb.w r1, r12, #64 +; CHECK-NEXT: lsr.w r2, r3, r2 +; CHECK-NEXT: rsb.w r12, r1, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r2, #0 +; CHECK-NEXT: cmp.w lr, #0 +; CHECK-NEXT: lsl.w r2, r2, r1 +; CHECK-NEXT: lsr.w r4, r0, r12 +; CHECK-NEXT: orr.w r2, r2, r4 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl.w r2, r0, lr +; CHECK-NEXT: lsl.w r0, r0, r1 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r0, #0 +; CHECK-NEXT: lsl.w r3, r2, r12 +; CHECK-NEXT: lsr.w r0, r0, r1 +; CHECK-NEXT: lsr.w r1, r2, r1 +; CHECK-NEXT: orr.w r0, r0, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r2, lr +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: pop {r4, pc} +; +; V7M-LABEL: bextr64_d2_load: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: ldrd r0, r3, [r0] +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: lsl.w r1, r3, r1 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: rsb.w lr, r12, #32 +; V7M-NEXT: orrs r0, r1 +; V7M-NEXT: subs.w r1, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r3, r1 +; V7M-NEXT: rsb.w r1, r12, #64 +; V7M-NEXT: lsr.w r2, r3, r2 +; V7M-NEXT: rsb.w r12, r1, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: cmp.w lr, #0 +; V7M-NEXT: lsl.w r2, r2, r1 +; V7M-NEXT: lsr.w r4, r0, r12 +; V7M-NEXT: orr.w r2, r2, r4 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r2, r0, lr +; V7M-NEXT: lsl.w r0, r0, r1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsl.w r3, r2, r12 +; V7M-NEXT: lsr.w r0, r0, r1 +; V7M-NEXT: lsr.w r1, r2, r1 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r2, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_d2_load: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: ldrd r0, r1, [r0] +; V7A-NEXT: subs lr, r2, #32 +; V7A-NEXT: lsr r3, r1, r2 +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: movwpl r3, #0 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: rsb r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r2 +; V7A-NEXT: lsrpl r0, r1, lr +; V7A-NEXT: rsb r1, r12, #64 +; V7A-NEXT: rsb lr, r1, #32 +; V7A-NEXT: lsr r2, r0, lr +; V7A-NEXT: orr r2, r2, r3, lsl r1 +; V7A-NEXT: rsbs r3, r12, #32 +; V7A-NEXT: lslpl r2, r0, r3 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: lsr r1, r2, r1 +; V7A-NEXT: orr r0, r0, r2, lsl lr +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: lsrpl r0, r2, r3 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bextr64_d2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: ldrd r0, r3, [r0] +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: lsl.w r1, r3, r1 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: orrs r0, r1 +; V7A-T-NEXT: subs.w r1, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r3, r1 +; V7A-T-NEXT: lsr.w r2, r3, r2 +; V7A-T-NEXT: rsb.w r1, r12, #64 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: rsb.w lr, r1, #32 +; V7A-T-NEXT: rsbs.w r3, r12, #32 +; V7A-T-NEXT: lsl.w r2, r2, r1 +; V7A-T-NEXT: lsr.w r4, r0, lr +; V7A-T-NEXT: orr.w r2, r2, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r2, r0, r3 +; V7A-T-NEXT: lsl.w r0, r0, r1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsl.w r4, r2, lr +; V7A-T-NEXT: lsr.w r0, r0, r1 +; V7A-T-NEXT: lsr.w r1, r2, r1 +; V7A-T-NEXT: orr.w r0, r0, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r2, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_d2_load: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: ldr r3, [r0] +; V6M-NEXT: ldr r1, [r0, #4] +; V6M-NEXT: mov r0, r3 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldr r2, [sp, #8] +; V6M-NEXT: movs r3, #64 +; V6M-NEXT: subs r4, r3, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, pc} + %val = load i64, ptr %w + %shifted = lshr i64 %val, %numskipbits + %numhighbits = sub i64 64, %numlowbits + %highbitscleared = shl i64 %shifted, %numhighbits + %masked = lshr i64 %highbitscleared, %numhighbits + ret i64 %masked +} + +define i64 @bextr64_d3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_d3_load_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r4, lr} +; CHECK-NEXT: push {r4, lr} +; CHECK-NEXT: ldrd r0, lr, [r0] +; CHECK-NEXT: uxtb r1, r1 +; CHECK-NEXT: rsb.w r2, r2, #64 +; CHECK-NEXT: subs.w r3, r1, #32 +; CHECK-NEXT: lsr.w r12, r0, r1 +; CHECK-NEXT: rsb.w r0, r1, #32 +; CHECK-NEXT: lsr.w r1, lr, r1 +; CHECK-NEXT: uxtb r2, r2 +; CHECK-NEXT: lsl.w r0, lr, r0 +; CHECK-NEXT: orr.w r0, r0, r12 +; CHECK-NEXT: rsb.w r12, r2, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, lr, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: lsls r1, r2 +; CHECK-NEXT: sub.w r3, r2, #32 +; CHECK-NEXT: lsr.w r4, r0, r12 +; CHECK-NEXT: orrs r1, r4 +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl.w r1, r0, r3 +; CHECK-NEXT: lsl.w r0, r0, r2 +; CHECK-NEXT: lsl.w r4, r1, r12 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r0, #0 +; CHECK-NEXT: lsr.w r0, r0, r2 +; CHECK-NEXT: orr.w r0, r0, r4 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r1, r3 +; CHECK-NEXT: lsr.w r1, r1, r2 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: pop {r4, pc} +; +; V7M-LABEL: bextr64_d3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: ldrd r0, lr, [r0] +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: rsb.w r2, r2, #64 +; V7M-NEXT: subs.w r3, r1, #32 +; V7M-NEXT: lsr.w r12, r0, r1 +; V7M-NEXT: rsb.w r0, r1, #32 +; V7M-NEXT: lsr.w r1, lr, r1 +; V7M-NEXT: uxtb r2, r2 +; V7M-NEXT: lsl.w r0, lr, r0 +; V7M-NEXT: orr.w r0, r0, r12 +; V7M-NEXT: rsb.w r12, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, lr, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: lsls r1, r2 +; V7M-NEXT: sub.w r3, r2, #32 +; V7M-NEXT: lsr.w r4, r0, r12 +; V7M-NEXT: orrs r1, r4 +; V7M-NEXT: cmp r3, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, r0, r3 +; V7M-NEXT: lsl.w r0, r0, r2 +; V7M-NEXT: lsl.w r4, r1, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsr.w r0, r0, r2 +; V7M-NEXT: orr.w r0, r0, r4 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r3 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_d3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r5, r11, lr} +; V7A-NEXT: push {r4, r5, r11, lr} +; V7A-NEXT: ldr r4, [r0] +; V7A-NEXT: ldr r3, [r0, #4] +; V7A-NEXT: uxtb r0, r1 +; V7A-NEXT: lsr r12, r4, r0 +; V7A-NEXT: rsb r4, r0, #32 +; V7A-NEXT: lsr r0, r3, r0 +; V7A-NEXT: orr r4, r12, r3, lsl r4 +; V7A-NEXT: mvn r12, #31 +; V7A-NEXT: uxtab r1, r12, r1 +; V7A-NEXT: cmp r1, #0 +; V7A-NEXT: lsrpl r4, r3, r1 +; V7A-NEXT: rsb r1, r2, #64 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: uxtb r2, r1 +; V7A-NEXT: rsb lr, r2, #32 +; V7A-NEXT: uxtab r1, r12, r1 +; V7A-NEXT: lsr r5, r4, lr +; V7A-NEXT: orr r3, r5, r0, lsl r2 +; V7A-NEXT: cmp r1, #0 +; V7A-NEXT: lsl r0, r4, r2 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lslpl r3, r4, r1 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: orr r0, r0, r3, lsl lr +; V7A-NEXT: lsrpl r0, r3, r1 +; V7A-NEXT: lsr r1, r3, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: pop {r4, r5, r11, pc} +; +; V7A-T-LABEL: bextr64_d3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, r5, r6, lr} +; V7A-T-NEXT: push {r4, r5, r6, lr} +; V7A-T-NEXT: ldrd r12, lr, [r0] +; V7A-T-NEXT: uxtb r0, r1 +; V7A-T-NEXT: rsb.w r6, r0, #32 +; V7A-T-NEXT: lsr.w r3, lr, r0 +; V7A-T-NEXT: rsb.w r2, r2, #64 +; V7A-T-NEXT: mvn r4, #31 +; V7A-T-NEXT: lsr.w r0, r12, r0 +; V7A-T-NEXT: uxtab r1, r4, r1 +; V7A-T-NEXT: lsl.w r6, lr, r6 +; V7A-T-NEXT: orrs r0, r6 +; V7A-T-NEXT: cmp r1, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r3, #0 +; V7A-T-NEXT: uxtb r5, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, lr, r1 +; V7A-T-NEXT: rsb.w r1, r5, #32 +; V7A-T-NEXT: lsls r3, r5 +; V7A-T-NEXT: uxtab r2, r4, r2 +; V7A-T-NEXT: lsr.w r6, r0, r1 +; V7A-T-NEXT: orrs r3, r6 +; V7A-T-NEXT: cmp r2, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r3, r0, r2 +; V7A-T-NEXT: lsl.w r0, r0, r5 +; V7A-T-NEXT: lsl.w r1, r3, r1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsr.w r0, r0, r5 +; V7A-T-NEXT: orr.w r0, r0, r1 +; V7A-T-NEXT: lsr.w r1, r3, r5 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r3, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: pop {r4, r5, r6, pc} +; +; V6M-LABEL: bextr64_d3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r2 +; V6M-NEXT: ldr r5, [r0] +; V6M-NEXT: ldr r3, [r0, #4] +; V6M-NEXT: uxtb r2, r1 +; V6M-NEXT: mov r0, r5 +; V6M-NEXT: mov r1, r3 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: movs r2, #64 +; V6M-NEXT: subs r2, r2, r4 +; V6M-NEXT: uxtb r4, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, r5, r7, pc} + %val = load i64, ptr %w + %skip = zext i8 %numskipbits to i64 + %shifted = lshr i64 %val, %skip + %numhighbits = sub i8 64, %numlowbits + %sh_prom = zext i8 %numhighbits to i64 + %highbitscleared = shl i64 %shifted, %sh_prom + %masked = lshr i64 %highbitscleared, %sh_prom + ret i64 %masked +} + +; 64-bit, but with 32-bit output + +; Everything done in 64-bit, truncation happens last. +define i32 @bextr64_32_d0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_32_d0: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r4, lr} +; CHECK-NEXT: push {r4, lr} +; CHECK-NEXT: rsb.w r3, r2, #32 +; CHECK-NEXT: ldr.w r12, [sp, #8] +; CHECK-NEXT: lsrs r0, r2 +; CHECK-NEXT: lsl.w r3, r1, r3 +; CHECK-NEXT: orrs r0, r3 +; CHECK-NEXT: subs.w r3, r2, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r1, r3 +; CHECK-NEXT: lsr.w r1, r1, r2 +; CHECK-NEXT: rsb.w r3, r12, #64 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: rsb.w lr, r12, #32 +; CHECK-NEXT: rsb.w r12, r3, #32 +; CHECK-NEXT: lsls r1, r3 +; CHECK-NEXT: cmp.w lr, #0 +; CHECK-NEXT: lsr.w r4, r0, r12 +; CHECK-NEXT: orr.w r1, r1, r4 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl.w r1, r0, lr +; CHECK-NEXT: lsl.w r0, r0, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r0, #0 +; CHECK-NEXT: lsl.w r2, r1, r12 +; CHECK-NEXT: lsr.w r0, r0, r3 +; CHECK-NEXT: orr.w r0, r0, r2 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r1, lr +; CHECK-NEXT: pop {r4, pc} +; +; V7M-LABEL: bextr64_32_d0: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orrs r0, r3 +; V7M-NEXT: subs.w r3, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r3 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: rsb.w r3, r12, #64 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: rsb.w lr, r12, #32 +; V7M-NEXT: rsb.w r12, r3, #32 +; V7M-NEXT: lsls r1, r3 +; V7M-NEXT: cmp.w lr, #0 +; V7M-NEXT: lsr.w r4, r0, r12 +; V7M-NEXT: orr.w r1, r1, r4 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, r0, lr +; V7M-NEXT: lsl.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsl.w r2, r1, r12 +; V7M-NEXT: lsr.w r0, r0, r3 +; V7M-NEXT: orr.w r0, r0, r2 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, lr +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_32_d0: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: lsr r3, r1, r2 +; V7A-NEXT: subs lr, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: rsb r2, r2, #32 +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: movwpl r3, #0 +; V7A-NEXT: orr r0, r0, r1, lsl r2 +; V7A-NEXT: lsrpl r0, r1, lr +; V7A-NEXT: rsb r1, r12, #64 +; V7A-NEXT: rsb lr, r1, #32 +; V7A-NEXT: lsr r2, r0, lr +; V7A-NEXT: orr r2, r2, r3, lsl r1 +; V7A-NEXT: rsbs r3, r12, #32 +; V7A-NEXT: lslpl r2, r0, r3 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: orr r0, r0, r2, lsl lr +; V7A-NEXT: lsrpl r0, r2, r3 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bextr64_32_d0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orrs r0, r3 +; V7A-T-NEXT: subs.w r3, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r3 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: rsb.w r3, r12, #64 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: rsb.w lr, r3, #32 +; V7A-T-NEXT: lsls r1, r3 +; V7A-T-NEXT: rsbs.w r2, r12, #32 +; V7A-T-NEXT: lsr.w r4, r0, lr +; V7A-T-NEXT: orr.w r1, r1, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r1, r0, r2 +; V7A-T-NEXT: lsl.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsl.w r4, r1, lr +; V7A-T-NEXT: lsr.w r0, r0, r3 +; V7A-T-NEXT: orr.w r0, r0, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_32_d0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldr r2, [sp, #8] +; V6M-NEXT: movs r3, #64 +; V6M-NEXT: subs r4, r3, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, pc} + %shifted = lshr i64 %val, %numskipbits + %numhighbits = sub i64 64, %numlowbits + %highbitscleared = shl i64 %shifted, %numhighbits + %masked = lshr i64 %highbitscleared, %numhighbits + %res = trunc i64 %masked to i32 + ret i32 %res +} + +; Shifting happens in 64-bit, then truncation. Masking is 32-bit. +define i32 @bextr64_32_d1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind { +; CHECK-LABEL: bextr64_32_d1: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsb.w r3, r2, #32 +; CHECK-NEXT: lsrs r0, r2 +; CHECK-NEXT: subs r2, #32 +; CHECK-NEXT: lsl.w r3, r1, r3 +; CHECK-NEXT: orr.w r0, r0, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r1, r2 +; CHECK-NEXT: ldr r1, [sp] +; CHECK-NEXT: rsb.w r1, r1, #32 +; CHECK-NEXT: lsls r0, r1 +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bextr64_32_d1: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldr r1, [sp] +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_d1: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldr r12, [sp] +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: rsb r1, r12, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_32_d1: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: rsb.w r1, r12, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_d1: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r7, lr} +; V6M-NEXT: push {r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldr r1, [sp, #8] +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: pop {r7, pc} + %shifted = lshr i64 %val, %numskipbits + %truncshifted = trunc i64 %shifted to i32 + %numhighbits = sub i32 32, %numlowbits + %highbitscleared = shl i32 %truncshifted, %numhighbits + %masked = lshr i32 %highbitscleared, %numhighbits + ret i32 %masked +} + +; ---------------------------------------------------------------------------- ; +; Constant +; ---------------------------------------------------------------------------- ; + +; https://bugs.llvm.org/show_bug.cgi?id=38938 +define void @pr38938(ptr %a0, ptr %a1) nounwind { +; CHECK-LABEL: pr38938: +; CHECK: @ %bb.0: +; CHECK-NEXT: ldr r1, [r1] +; CHECK-NEXT: ubfx r1, r1, #21, #10 +; CHECK-NEXT: ldr.w r2, [r0, r1, lsl #2] +; CHECK-NEXT: adds r2, #1 +; CHECK-NEXT: str.w r2, [r0, r1, lsl #2] +; CHECK-NEXT: bx lr +; +; V7M-LABEL: pr38938: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r1, [r1] +; V7M-NEXT: ubfx r1, r1, #21, #10 +; V7M-NEXT: ldr.w r2, [r0, r1, lsl #2] +; V7M-NEXT: adds r2, #1 +; V7M-NEXT: str.w r2, [r0, r1, lsl #2] +; V7M-NEXT: bx lr +; +; V7A-LABEL: pr38938: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r1, [r1] +; V7A-NEXT: ubfx r1, r1, #21, #10 +; V7A-NEXT: ldr r2, [r0, r1, lsl #2] +; V7A-NEXT: add r2, r2, #1 +; V7A-NEXT: str r2, [r0, r1, lsl #2] +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: pr38938: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r1, [r1] +; V7A-T-NEXT: ubfx r1, r1, #21, #10 +; V7A-T-NEXT: ldr.w r2, [r0, r1, lsl #2] +; V7A-T-NEXT: adds r2, #1 +; V7A-T-NEXT: str.w r2, [r0, r1, lsl #2] +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: pr38938: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r1, [r1] +; V6M-NEXT: lsrs r1, r1, #19 +; V6M-NEXT: ldr r2, .LCPI51_0 +; V6M-NEXT: ands r2, r1 +; V6M-NEXT: ldr r1, [r0, r2] +; V6M-NEXT: adds r1, r1, #1 +; V6M-NEXT: str r1, [r0, r2] +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI51_0: +; V6M-NEXT: .long 4092 @ 0xffc + %tmp = load i64, ptr %a1, align 8 + %tmp1 = lshr i64 %tmp, 21 + %tmp2 = and i64 %tmp1, 1023 + %tmp3 = getelementptr inbounds i32, ptr %a0, i64 %tmp2 + %tmp4 = load i32, ptr %tmp3, align 4 + %tmp5 = add nsw i32 %tmp4, 1 + store i32 %tmp5, ptr %tmp3, align 4 + ret void +} + +; The most canonical variant +define i32 @c0_i32(i32 %arg) nounwind { +; CHECK-LABEL: c0_i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: ubfx r0, r0, #19, #10 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: c0_i32: +; V7M: @ %bb.0: +; V7M-NEXT: ubfx r0, r0, #19, #10 +; V7M-NEXT: bx lr +; +; V7A-LABEL: c0_i32: +; V7A: @ %bb.0: +; V7A-NEXT: ubfx r0, r0, #19, #10 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c0_i32: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ubfx r0, r0, #19, #10 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c0_i32: +; V6M: @ %bb.0: +; V6M-NEXT: lsls r0, r0, #3 +; V6M-NEXT: lsrs r0, r0, #22 +; V6M-NEXT: bx lr + %tmp0 = lshr i32 %arg, 19 + %tmp1 = and i32 %tmp0, 1023 + ret i32 %tmp1 +} + +; Should be still fine, but the mask is shifted +define i32 @c1_i32(i32 %arg) nounwind { +; CHECK-LABEL: c1_i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: movw r1, #4092 +; CHECK-NEXT: and.w r0, r1, r0, lsr #19 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: c1_i32: +; V7M: @ %bb.0: +; V7M-NEXT: movw r1, #4092 +; V7M-NEXT: and.w r0, r1, r0, lsr #19 +; V7M-NEXT: bx lr +; +; V7A-LABEL: c1_i32: +; V7A: @ %bb.0: +; V7A-NEXT: movw r1, #4092 +; V7A-NEXT: and r0, r1, r0, lsr #19 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c1_i32: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movw r1, #4092 +; V7A-T-NEXT: and.w r0, r1, r0, lsr #19 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c1_i32: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r1, r0, #19 +; V6M-NEXT: ldr r0, .LCPI53_0 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI53_0: +; V6M-NEXT: .long 4092 @ 0xffc + %tmp0 = lshr i32 %arg, 19 + %tmp1 = and i32 %tmp0, 4092 + ret i32 %tmp1 +} + +; Should be still fine, but the result is shifted left afterwards +define i32 @c2_i32(i32 %arg) nounwind { +; CHECK-LABEL: c2_i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: movw r1, #4092 +; CHECK-NEXT: and.w r0, r1, r0, lsr #17 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: c2_i32: +; V7M: @ %bb.0: +; V7M-NEXT: movw r1, #4092 +; V7M-NEXT: and.w r0, r1, r0, lsr #17 +; V7M-NEXT: bx lr +; +; V7A-LABEL: c2_i32: +; V7A: @ %bb.0: +; V7A-NEXT: movw r1, #4092 +; V7A-NEXT: and r0, r1, r0, lsr #17 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c2_i32: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movw r1, #4092 +; V7A-T-NEXT: and.w r0, r1, r0, lsr #17 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c2_i32: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r1, r0, #17 +; V6M-NEXT: ldr r0, .LCPI54_0 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI54_0: +; V6M-NEXT: .long 4092 @ 0xffc + %tmp0 = lshr i32 %arg, 19 + %tmp1 = and i32 %tmp0, 1023 + %tmp2 = shl i32 %tmp1, 2 + ret i32 %tmp2 +} + +; The mask covers newly shifted-in bit +define i32 @c4_i32_bad(i32 %arg) nounwind { +; CHECK-LABEL: c4_i32_bad: +; CHECK: @ %bb.0: +; CHECK-NEXT: mvn r1, #1 +; CHECK-NEXT: and.w r0, r1, r0, lsr #19 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: c4_i32_bad: +; V7M: @ %bb.0: +; V7M-NEXT: mvn r1, #1 +; V7M-NEXT: and.w r0, r1, r0, lsr #19 +; V7M-NEXT: bx lr +; +; V7A-LABEL: c4_i32_bad: +; V7A: @ %bb.0: +; V7A-NEXT: mvn r1, #1 +; V7A-NEXT: and r0, r1, r0, lsr #19 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c4_i32_bad: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mvn r1, #1 +; V7A-T-NEXT: and.w r0, r1, r0, lsr #19 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c4_i32_bad: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r0, #20 +; V6M-NEXT: lsls r0, r0, #1 +; V6M-NEXT: bx lr + %tmp0 = lshr i32 %arg, 19 + %tmp1 = and i32 %tmp0, 16382 + ret i32 %tmp1 +} + +; i64 + +; The most canonical variant +define i64 @c0_i64(i64 %arg) nounwind { +; CHECK-LABEL: c0_i64: +; CHECK: @ %bb.0: +; CHECK-NEXT: ubfx r0, r1, #19, #10 +; CHECK-NEXT: movs r1, #0 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: c0_i64: +; V7M: @ %bb.0: +; V7M-NEXT: ubfx r0, r1, #19, #10 +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: c0_i64: +; V7A: @ %bb.0: +; V7A-NEXT: ubfx r0, r1, #19, #10 +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c0_i64: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ubfx r0, r1, #19, #10 +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c0_i64: +; V6M: @ %bb.0: +; V6M-NEXT: lsls r0, r1, #3 +; V6M-NEXT: lsrs r0, r0, #22 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 1023 + ret i64 %tmp1 +} + +; Should be still fine, but the mask is shifted +define i64 @c1_i64(i64 %arg) nounwind { +; CHECK-LABEL: c1_i64: +; CHECK: @ %bb.0: +; CHECK-NEXT: movw r0, #4092 +; CHECK-NEXT: and.w r0, r0, r1, lsr #19 +; CHECK-NEXT: movs r1, #0 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: c1_i64: +; V7M: @ %bb.0: +; V7M-NEXT: movw r0, #4092 +; V7M-NEXT: and.w r0, r0, r1, lsr #19 +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: c1_i64: +; V7A: @ %bb.0: +; V7A-NEXT: movw r0, #4092 +; V7A-NEXT: and r0, r0, r1, lsr #19 +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c1_i64: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movw r0, #4092 +; V7A-T-NEXT: and.w r0, r0, r1, lsr #19 +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c1_i64: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r1, r1, #19 +; V6M-NEXT: ldr r0, .LCPI57_0 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI57_0: +; V6M-NEXT: .long 4092 @ 0xffc + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 4092 + ret i64 %tmp1 +} + +; Should be still fine, but the result is shifted left afterwards +define i64 @c2_i64(i64 %arg) nounwind { +; CHECK-LABEL: c2_i64: +; CHECK: @ %bb.0: +; CHECK-NEXT: movw r0, #4092 +; CHECK-NEXT: and.w r0, r0, r1, lsr #17 +; CHECK-NEXT: movs r1, #0 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: c2_i64: +; V7M: @ %bb.0: +; V7M-NEXT: movw r0, #4092 +; V7M-NEXT: and.w r0, r0, r1, lsr #17 +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: c2_i64: +; V7A: @ %bb.0: +; V7A-NEXT: movw r0, #4092 +; V7A-NEXT: and r0, r0, r1, lsr #17 +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c2_i64: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movw r0, #4092 +; V7A-T-NEXT: and.w r0, r0, r1, lsr #17 +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c2_i64: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r1, r1, #17 +; V6M-NEXT: ldr r0, .LCPI58_0 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI58_0: +; V6M-NEXT: .long 4092 @ 0xffc + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 1023 + %tmp2 = shl i64 %tmp1, 2 + ret i64 %tmp2 +} + +; The mask covers newly shifted-in bit +define i64 @c4_i64_bad(i64 %arg) nounwind { +; CHECK-LABEL: c4_i64_bad: +; CHECK: @ %bb.0: +; CHECK-NEXT: mvn r0, #1 +; CHECK-NEXT: and.w r0, r0, r1, lsr #19 +; CHECK-NEXT: movs r1, #0 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: c4_i64_bad: +; V7M: @ %bb.0: +; V7M-NEXT: mvn r0, #1 +; V7M-NEXT: and.w r0, r0, r1, lsr #19 +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: c4_i64_bad: +; V7A: @ %bb.0: +; V7A-NEXT: mvn r0, #1 +; V7A-NEXT: and r0, r0, r1, lsr #19 +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c4_i64_bad: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mvn r0, #1 +; V7A-T-NEXT: and.w r0, r0, r1, lsr #19 +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c4_i64_bad: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r1, #20 +; V6M-NEXT: lsls r0, r0, #1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 16382 + ret i64 %tmp1 +} + +; ---------------------------------------------------------------------------- ; +; Constant, storing the result afterwards. +; ---------------------------------------------------------------------------- ; + +; i32 + +; The most canonical variant +define void @c5_i32(i32 %arg, ptr %ptr) nounwind { +; CHECK-LABEL: c5_i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: ubfx r0, r0, #19, #10 +; CHECK-NEXT: str r0, [r1] +; CHECK-NEXT: bx lr +; +; V7M-LABEL: c5_i32: +; V7M: @ %bb.0: +; V7M-NEXT: ubfx r0, r0, #19, #10 +; V7M-NEXT: str r0, [r1] +; V7M-NEXT: bx lr +; +; V7A-LABEL: c5_i32: +; V7A: @ %bb.0: +; V7A-NEXT: ubfx r0, r0, #19, #10 +; V7A-NEXT: str r0, [r1] +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c5_i32: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ubfx r0, r0, #19, #10 +; V7A-T-NEXT: str r0, [r1] +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c5_i32: +; V6M: @ %bb.0: +; V6M-NEXT: lsls r0, r0, #3 +; V6M-NEXT: lsrs r0, r0, #22 +; V6M-NEXT: str r0, [r1] +; V6M-NEXT: bx lr + %tmp0 = lshr i32 %arg, 19 + %tmp1 = and i32 %tmp0, 1023 + store i32 %tmp1, ptr %ptr + ret void +} + +; Should be still fine, but the mask is shifted +define void @c6_i32(i32 %arg, ptr %ptr) nounwind { +; CHECK-LABEL: c6_i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: ubfx r0, r0, #19, #12 +; CHECK-NEXT: str r0, [r1] +; CHECK-NEXT: bx lr +; +; V7M-LABEL: c6_i32: +; V7M: @ %bb.0: +; V7M-NEXT: ubfx r0, r0, #19, #12 +; V7M-NEXT: str r0, [r1] +; V7M-NEXT: bx lr +; +; V7A-LABEL: c6_i32: +; V7A: @ %bb.0: +; V7A-NEXT: ubfx r0, r0, #19, #12 +; V7A-NEXT: str r0, [r1] +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c6_i32: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ubfx r0, r0, #19, #12 +; V7A-T-NEXT: str r0, [r1] +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c6_i32: +; V6M: @ %bb.0: +; V6M-NEXT: lsls r0, r0, #1 +; V6M-NEXT: lsrs r0, r0, #20 +; V6M-NEXT: str r0, [r1] +; V6M-NEXT: bx lr + %tmp0 = lshr i32 %arg, 19 + %tmp1 = and i32 %tmp0, 4095 + store i32 %tmp1, ptr %ptr + ret void +} + +; Should be still fine, but the result is shifted left afterwards +define void @c7_i32(i32 %arg, ptr %ptr) nounwind { +; CHECK-LABEL: c7_i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: movw r2, #4092 +; CHECK-NEXT: and.w r0, r2, r0, lsr #17 +; CHECK-NEXT: str r0, [r1] +; CHECK-NEXT: bx lr +; +; V7M-LABEL: c7_i32: +; V7M: @ %bb.0: +; V7M-NEXT: movw r2, #4092 +; V7M-NEXT: and.w r0, r2, r0, lsr #17 +; V7M-NEXT: str r0, [r1] +; V7M-NEXT: bx lr +; +; V7A-LABEL: c7_i32: +; V7A: @ %bb.0: +; V7A-NEXT: movw r2, #4092 +; V7A-NEXT: and r0, r2, r0, lsr #17 +; V7A-NEXT: str r0, [r1] +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c7_i32: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movw r2, #4092 +; V7A-T-NEXT: and.w r0, r2, r0, lsr #17 +; V7A-T-NEXT: str r0, [r1] +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c7_i32: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r0, #17 +; V6M-NEXT: ldr r2, .LCPI62_0 +; V6M-NEXT: ands r2, r0 +; V6M-NEXT: str r2, [r1] +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI62_0: +; V6M-NEXT: .long 4092 @ 0xffc + %tmp0 = lshr i32 %arg, 19 + %tmp1 = and i32 %tmp0, 1023 + %tmp2 = shl i32 %tmp1, 2 + store i32 %tmp2, ptr %ptr + ret void +} + +; i64 + +; The most canonical variant +define void @c5_i64(i64 %arg, ptr %ptr) nounwind { +; CHECK-LABEL: c5_i64: +; CHECK: @ %bb.0: +; CHECK-NEXT: movs r0, #0 +; CHECK-NEXT: ubfx r1, r1, #19, #10 +; CHECK-NEXT: strd r1, r0, [r2] +; CHECK-NEXT: bx lr +; +; V7M-LABEL: c5_i64: +; V7M: @ %bb.0: +; V7M-NEXT: movs r0, #0 +; V7M-NEXT: ubfx r1, r1, #19, #10 +; V7M-NEXT: strd r1, r0, [r2] +; V7M-NEXT: bx lr +; +; V7A-LABEL: c5_i64: +; V7A: @ %bb.0: +; V7A-NEXT: mov r0, #0 +; V7A-NEXT: str r0, [r2, #4] +; V7A-NEXT: ubfx r0, r1, #19, #10 +; V7A-NEXT: str r0, [r2] +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c5_i64: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movs r0, #0 +; V7A-T-NEXT: ubfx r1, r1, #19, #10 +; V7A-T-NEXT: strd r1, r0, [r2] +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c5_i64: +; V6M: @ %bb.0: +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: lsls r1, r1, #3 +; V6M-NEXT: lsrs r1, r1, #22 +; V6M-NEXT: str r1, [r2] +; V6M-NEXT: str r0, [r2, #4] +; V6M-NEXT: bx lr + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 1023 + store i64 %tmp1, ptr %ptr + ret void +} + +; Should be still fine, but the mask is shifted +define void @c6_i64(i64 %arg, ptr %ptr) nounwind { +; CHECK-LABEL: c6_i64: +; CHECK: @ %bb.0: +; CHECK-NEXT: movs r0, #0 +; CHECK-NEXT: ubfx r1, r1, #19, #12 +; CHECK-NEXT: strd r1, r0, [r2] +; CHECK-NEXT: bx lr +; +; V7M-LABEL: c6_i64: +; V7M: @ %bb.0: +; V7M-NEXT: movs r0, #0 +; V7M-NEXT: ubfx r1, r1, #19, #12 +; V7M-NEXT: strd r1, r0, [r2] +; V7M-NEXT: bx lr +; +; V7A-LABEL: c6_i64: +; V7A: @ %bb.0: +; V7A-NEXT: mov r0, #0 +; V7A-NEXT: str r0, [r2, #4] +; V7A-NEXT: ubfx r0, r1, #19, #12 +; V7A-NEXT: str r0, [r2] +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c6_i64: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movs r0, #0 +; V7A-T-NEXT: ubfx r1, r1, #19, #12 +; V7A-T-NEXT: strd r1, r0, [r2] +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c6_i64: +; V6M: @ %bb.0: +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: lsls r1, r1, #1 +; V6M-NEXT: lsrs r1, r1, #20 +; V6M-NEXT: str r1, [r2] +; V6M-NEXT: str r0, [r2, #4] +; V6M-NEXT: bx lr + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 4095 + store i64 %tmp1, ptr %ptr + ret void +} + +; Should be still fine, but the result is shifted left afterwards +define void @c7_i64(i64 %arg, ptr %ptr) nounwind { +; CHECK-LABEL: c7_i64: +; CHECK: @ %bb.0: +; CHECK-NEXT: movs r0, #0 +; CHECK-NEXT: movw r3, #4092 +; CHECK-NEXT: and.w r1, r3, r1, lsr #17 +; CHECK-NEXT: strd r1, r0, [r2] +; CHECK-NEXT: bx lr +; +; V7M-LABEL: c7_i64: +; V7M: @ %bb.0: +; V7M-NEXT: movs r0, #0 +; V7M-NEXT: movw r3, #4092 +; V7M-NEXT: and.w r1, r3, r1, lsr #17 +; V7M-NEXT: strd r1, r0, [r2] +; V7M-NEXT: bx lr +; +; V7A-LABEL: c7_i64: +; V7A: @ %bb.0: +; V7A-NEXT: movw r0, #4092 +; V7A-NEXT: mov r3, #0 +; V7A-NEXT: and r0, r0, r1, lsr #17 +; V7A-NEXT: stm r2, {r0, r3} +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c7_i64: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movs r0, #0 +; V7A-T-NEXT: movw r3, #4092 +; V7A-T-NEXT: and.w r1, r3, r1, lsr #17 +; V7A-T-NEXT: strd r1, r0, [r2] +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c7_i64: +; V6M: @ %bb.0: +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: lsrs r1, r1, #17 +; V6M-NEXT: ldr r3, .LCPI65_0 +; V6M-NEXT: ands r3, r1 +; V6M-NEXT: str r3, [r2] +; V6M-NEXT: str r0, [r2, #4] +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI65_0: +; V6M-NEXT: .long 4092 @ 0xffc + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 1023 + %tmp2 = shl i64 %tmp1, 2 + store i64 %tmp2, ptr %ptr + ret void +} diff --git a/llvm/test/CodeGen/ARM/extract-lowbits.ll b/llvm/test/CodeGen/ARM/extract-lowbits.ll new file mode 100644 index 0000000000000..96afc6302ea8a --- /dev/null +++ b/llvm/test/CodeGen/ARM/extract-lowbits.ll @@ -0,0 +1,3357 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv7m-eabi %s -o - | FileCheck %s --check-prefix V7M +; RUN: llc -mtriple=armv7a-eabi %s -o - | FileCheck %s --check-prefix V7A +; RUN: llc -mtriple=thumbv7a-eabi %s -o - | FileCheck %s --check-prefix V7A-T +; RUN: llc -mtriple=armv6m-eabi %s -o - | FileCheck %s --check-prefix V6M + +; *Please* keep in sync with test/CodeGen/X86/extract-lowbits.ll + +; https://bugs.llvm.org/show_bug.cgi?id=36419 +; https://bugs.llvm.org/show_bug.cgi?id=37603 +; https://bugs.llvm.org/show_bug.cgi?id=37610 + +; Patterns: +; a) x & (1 << nbits) - 1 +; b) x & ~(-1 << nbits) +; c) x & (-1 >> (32 - y)) +; d) x << (32 - y) >> (32 - y) +; are equivalent. + +; ---------------------------------------------------------------------------- ; +; Pattern a. 32-bit +; ---------------------------------------------------------------------------- ; + +define i32 @bzhi32_a0(i32 %val, i32 %numlowbits) nounwind { +; CHECK-LABEL: bzhi32_a0: +; CHECK: @ %bb.0: +; CHECK-NEXT: movs r2, #1 +; CHECK-NEXT: lsl.w r1, r2, r1 +; CHECK-NEXT: subs r1, #1 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi32_a0: +; V7M: @ %bb.0: +; V7M-NEXT: movs r2, #1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_a0: +; V7A: @ %bb.0: +; V7A-NEXT: mov r2, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r1, r3, r2, lsl r1 +; V7A-NEXT: and r0, r1, r0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_a0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movs r2, #1 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_a0: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #1 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: subs r1, r2, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_a1_indexzext(i32 %val, i8 zeroext %numlowbits) nounwind { +; CHECK-LABEL: bzhi32_a1_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: movs r2, #1 +; CHECK-NEXT: lsl.w r1, r2, r1 +; CHECK-NEXT: subs r1, #1 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi32_a1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: movs r2, #1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_a1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: mov r2, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r1, r3, r2, lsl r1 +; V7A-NEXT: and r0, r1, r0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_a1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movs r2, #1 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_a1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #1 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: subs r1, r2, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %conv = zext i8 %numlowbits to i32 + %onebit = shl i32 1, %conv + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_a2_load(ptr %w, i32 %numlowbits) nounwind { +; CHECK-LABEL: bzhi32_a2_load: +; CHECK: @ %bb.0: +; CHECK-NEXT: movs r2, #1 +; CHECK-NEXT: ldr r0, [r0] +; CHECK-NEXT: lsl.w r1, r2, r1 +; CHECK-NEXT: subs r1, #1 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi32_a2_load: +; V7M: @ %bb.0: +; V7M-NEXT: movs r2, #1 +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_a2_load: +; V7A: @ %bb.0: +; V7A-NEXT: mov r2, #1 +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r1, r3, r2, lsl r1 +; V7A-NEXT: and r0, r1, r0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_a2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movs r2, #1 +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_a2_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #1 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: subs r1, r2, #1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_a3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind { +; CHECK-LABEL: bzhi32_a3_load_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: movs r2, #1 +; CHECK-NEXT: ldr r0, [r0] +; CHECK-NEXT: lsl.w r1, r2, r1 +; CHECK-NEXT: subs r1, #1 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi32_a3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: movs r2, #1 +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_a3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: mov r2, #1 +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r1, r3, r2, lsl r1 +; V7A-NEXT: and r0, r1, r0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_a3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movs r2, #1 +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_a3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #1 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: subs r1, r2, #1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %conv = zext i8 %numlowbits to i32 + %onebit = shl i32 1, %conv + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_a4_commutative(i32 %val, i32 %numlowbits) nounwind { +; CHECK-LABEL: bzhi32_a4_commutative: +; CHECK: @ %bb.0: +; CHECK-NEXT: movs r2, #1 +; CHECK-NEXT: lsl.w r1, r2, r1 +; CHECK-NEXT: subs r1, #1 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi32_a4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: movs r2, #1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_a4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: mov r2, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r1, r3, r2, lsl r1 +; V7A-NEXT: and r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_a4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movs r2, #1 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_a4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #1 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: subs r1, r2, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %val, %mask ; swapped order + ret i32 %masked +} + +; 64-bit + +define i64 @bzhi64_a0(i64 %val, i64 %numlowbits) nounwind { +; CHECK-LABEL: bzhi64_a0: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r7, lr} +; CHECK-NEXT: push {r7, lr} +; CHECK-NEXT: rsb.w r3, r2, #32 +; CHECK-NEXT: mov.w r12, #1 +; CHECK-NEXT: subs.w lr, r2, #32 +; CHECK-NEXT: lsl.w r2, r12, r2 +; CHECK-NEXT: lsr.w r3, r12, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl.w r3, r12, lr +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r2, #0 +; CHECK-NEXT: subs r2, #1 +; CHECK-NEXT: sbc r3, r3, #0 +; CHECK-NEXT: ands r0, r2 +; CHECK-NEXT: ands r1, r3 +; CHECK-NEXT: pop {r7, pc} +; +; V7M-LABEL: bzhi64_a0: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: mov.w r12, #1 +; V7M-NEXT: subs.w lr, r2, #32 +; V7M-NEXT: lsl.w r2, r12, r2 +; V7M-NEXT: lsr.w r3, r12, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r3, r12, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: subs r2, #1 +; V7M-NEXT: sbc r3, r3, #0 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bzhi64_a0: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: lsr lr, r12, r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsl r2, r12, r2 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: lslpl lr, r12, r3 +; V7A-NEXT: subs r2, r2, #1 +; V7A-NEXT: sbc r3, lr, #0 +; V7A-NEXT: and r0, r2, r0 +; V7A-NEXT: and r1, r3, r1 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_a0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: mov.w r12, #1 +; V7A-T-NEXT: subs.w lr, r2, #32 +; V7A-T-NEXT: lsl.w r2, r12, r2 +; V7A-T-NEXT: lsr.w r3, r12, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r3, r12, lr +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: subs r2, #1 +; V7A-T-NEXT: sbc r3, r3, #0 +; V7A-T-NEXT: ands r0, r2 +; V7A-T-NEXT: ands r1, r3 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_a0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, lr} +; V6M-NEXT: push {r4, r5, r6, lr} +; V6M-NEXT: mov r5, r1 +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r6, #0 +; V6M-NEXT: mov r1, r6 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: subs r0, r0, #1 +; V6M-NEXT: sbcs r1, r6 +; V6M-NEXT: ands r1, r5 +; V6M-NEXT: ands r0, r4 +; V6M-NEXT: pop {r4, r5, r6, pc} + %onebit = shl i64 1, %numlowbits + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +; Check that we don't throw away the vreg_width-1 mask if not using shifts +define i64 @bzhi64_a0_masked(i64 %val, i64 %numlowbits) nounwind { +; CHECK-LABEL: bzhi64_a0_masked: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r7, lr} +; CHECK-NEXT: push {r7, lr} +; CHECK-NEXT: and r2, r2, #63 +; CHECK-NEXT: mov.w r12, #1 +; CHECK-NEXT: rsb.w r3, r2, #32 +; CHECK-NEXT: subs.w lr, r2, #32 +; CHECK-NEXT: lsl.w r2, r12, r2 +; CHECK-NEXT: lsr.w r3, r12, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl.w r3, r12, lr +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r2, #0 +; CHECK-NEXT: subs r2, #1 +; CHECK-NEXT: sbc r3, r3, #0 +; CHECK-NEXT: ands r0, r2 +; CHECK-NEXT: ands r1, r3 +; CHECK-NEXT: pop {r7, pc} +; +; V7M-LABEL: bzhi64_a0_masked: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: and r2, r2, #63 +; V7M-NEXT: mov.w r12, #1 +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: subs.w lr, r2, #32 +; V7M-NEXT: lsl.w r2, r12, r2 +; V7M-NEXT: lsr.w r3, r12, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r3, r12, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: subs r2, #1 +; V7M-NEXT: sbc r3, r3, #0 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bzhi64_a0_masked: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: and r2, r2, #63 +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr lr, r12, r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsl r2, r12, r2 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: lslpl lr, r12, r3 +; V7A-NEXT: subs r2, r2, #1 +; V7A-NEXT: sbc r3, lr, #0 +; V7A-NEXT: and r0, r2, r0 +; V7A-NEXT: and r1, r3, r1 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_a0_masked: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: and r2, r2, #63 +; V7A-T-NEXT: mov.w r12, #1 +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: subs.w lr, r2, #32 +; V7A-T-NEXT: lsl.w r2, r12, r2 +; V7A-T-NEXT: lsr.w r3, r12, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r3, r12, lr +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: subs r2, #1 +; V7A-T-NEXT: sbc r3, r3, #0 +; V7A-T-NEXT: ands r0, r2 +; V7A-T-NEXT: ands r1, r3 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_a0_masked: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, lr} +; V6M-NEXT: push {r4, r5, r6, lr} +; V6M-NEXT: mov r5, r1 +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #63 +; V6M-NEXT: ands r2, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r6, #0 +; V6M-NEXT: mov r1, r6 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: subs r0, r0, #1 +; V6M-NEXT: sbcs r1, r6 +; V6M-NEXT: ands r1, r5 +; V6M-NEXT: ands r0, r4 +; V6M-NEXT: pop {r4, r5, r6, pc} + %numlowbits.masked = and i64 %numlowbits, 63 + %onebit = shl i64 1, %numlowbits.masked + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_a1_indexzext(i64 %val, i8 zeroext %numlowbits) nounwind { +; CHECK-LABEL: bzhi64_a1_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r7, lr} +; CHECK-NEXT: push {r7, lr} +; CHECK-NEXT: rsb.w r3, r2, #32 +; CHECK-NEXT: mov.w r12, #1 +; CHECK-NEXT: subs.w lr, r2, #32 +; CHECK-NEXT: lsl.w r2, r12, r2 +; CHECK-NEXT: lsr.w r3, r12, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl.w r3, r12, lr +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r2, #0 +; CHECK-NEXT: subs r2, #1 +; CHECK-NEXT: sbc r3, r3, #0 +; CHECK-NEXT: ands r0, r2 +; CHECK-NEXT: ands r1, r3 +; CHECK-NEXT: pop {r7, pc} +; +; V7M-LABEL: bzhi64_a1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: mov.w r12, #1 +; V7M-NEXT: subs.w lr, r2, #32 +; V7M-NEXT: lsl.w r2, r12, r2 +; V7M-NEXT: lsr.w r3, r12, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r3, r12, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: subs r2, #1 +; V7M-NEXT: sbc r3, r3, #0 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bzhi64_a1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: lsr lr, r12, r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsl r2, r12, r2 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: lslpl lr, r12, r3 +; V7A-NEXT: subs r2, r2, #1 +; V7A-NEXT: sbc r3, lr, #0 +; V7A-NEXT: and r0, r2, r0 +; V7A-NEXT: and r1, r3, r1 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_a1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: mov.w r12, #1 +; V7A-T-NEXT: subs.w lr, r2, #32 +; V7A-T-NEXT: lsl.w r2, r12, r2 +; V7A-T-NEXT: lsr.w r3, r12, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r3, r12, lr +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: subs r2, #1 +; V7A-T-NEXT: sbc r3, r3, #0 +; V7A-T-NEXT: ands r0, r2 +; V7A-T-NEXT: ands r1, r3 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_a1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, lr} +; V6M-NEXT: push {r4, r5, r6, lr} +; V6M-NEXT: mov r5, r1 +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r6, #0 +; V6M-NEXT: mov r1, r6 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: subs r0, r0, #1 +; V6M-NEXT: sbcs r1, r6 +; V6M-NEXT: ands r1, r5 +; V6M-NEXT: ands r0, r4 +; V6M-NEXT: pop {r4, r5, r6, pc} + %conv = zext i8 %numlowbits to i64 + %onebit = shl i64 1, %conv + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_a2_load(ptr %w, i64 %numlowbits) nounwind { +; CHECK-LABEL: bzhi64_a2_load: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsb.w r1, r2, #32 +; CHECK-NEXT: movs r3, #1 +; CHECK-NEXT: subs.w r12, r2, #32 +; CHECK-NEXT: lsl.w r2, r3, r2 +; CHECK-NEXT: lsr.w r1, r3, r1 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl.w r1, r3, r12 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r2, #0 +; CHECK-NEXT: subs r2, #1 +; CHECK-NEXT: ldrd r0, r3, [r0] +; CHECK-NEXT: sbc r1, r1, #0 +; CHECK-NEXT: ands r1, r3 +; CHECK-NEXT: ands r0, r2 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi64_a2_load: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: movs r3, #1 +; V7M-NEXT: subs.w r12, r2, #32 +; V7M-NEXT: lsl.w r2, r3, r2 +; V7M-NEXT: lsr.w r1, r3, r1 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, r3, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: subs r2, #1 +; V7M-NEXT: ldrd r0, r3, [r0] +; V7M-NEXT: sbc r1, r1, #0 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_a2_load: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r6, r11, lr} +; V7A-NEXT: push {r4, r6, r11, lr} +; V7A-NEXT: ldr r6, [r0] +; V7A-NEXT: mov r1, #1 +; V7A-NEXT: ldr r3, [r0, #4] +; V7A-NEXT: rsb r0, r2, #32 +; V7A-NEXT: subs r4, r2, #32 +; V7A-NEXT: lsr r0, r1, r0 +; V7A-NEXT: lslpl r0, r1, r4 +; V7A-NEXT: lsl r1, r1, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: subs r2, r1, #1 +; V7A-NEXT: sbc r0, r0, #0 +; V7A-NEXT: and r1, r0, r3 +; V7A-NEXT: and r0, r2, r6 +; V7A-NEXT: pop {r4, r6, r11, pc} +; +; V7A-T-LABEL: bzhi64_a2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: ldrd r12, lr, [r0] +; V7A-T-NEXT: subs.w r0, r2, #32 +; V7A-T-NEXT: lsr.w r3, r1, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r3, r1, r0 +; V7A-T-NEXT: lsl.w r0, r1, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: subs r0, #1 +; V7A-T-NEXT: sbc r1, r3, #0 +; V7A-T-NEXT: and.w r0, r0, r12 +; V7A-T-NEXT: and.w r1, r1, lr +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_a2_load: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r5, #0 +; V6M-NEXT: mov r1, r5 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: subs r2, r0, #1 +; V6M-NEXT: sbcs r1, r5 +; V6M-NEXT: ldm r4!, {r0, r3} +; V6M-NEXT: ands r1, r3 +; V6M-NEXT: ands r0, r2 +; V6M-NEXT: pop {r4, r5, r7, pc} + %val = load i64, ptr %w + %onebit = shl i64 1, %numlowbits + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_a3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind { +; CHECK-LABEL: bzhi64_a3_load_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsb.w r2, r1, #32 +; CHECK-NEXT: movs r3, #1 +; CHECK-NEXT: subs.w r12, r1, #32 +; CHECK-NEXT: lsl.w r1, r3, r1 +; CHECK-NEXT: lsr.w r2, r3, r2 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl.w r2, r3, r12 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: subs r3, r1, #1 +; CHECK-NEXT: sbc r1, r2, #0 +; CHECK-NEXT: ldrd r0, r2, [r0] +; CHECK-NEXT: ands r1, r2 +; CHECK-NEXT: ands r0, r3 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi64_a3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r2, r1, #32 +; V7M-NEXT: movs r3, #1 +; V7M-NEXT: subs.w r12, r1, #32 +; V7M-NEXT: lsl.w r1, r3, r1 +; V7M-NEXT: lsr.w r2, r3, r2 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r2, r3, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: subs r3, r1, #1 +; V7M-NEXT: sbc r1, r2, #0 +; V7M-NEXT: ldrd r0, r2, [r0] +; V7M-NEXT: ands r1, r2 +; V7M-NEXT: ands r0, r3 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_a3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r6, r11, lr} +; V7A-NEXT: push {r4, r6, r11, lr} +; V7A-NEXT: ldr r6, [r0] +; V7A-NEXT: mov r2, #1 +; V7A-NEXT: ldr r3, [r0, #4] +; V7A-NEXT: rsb r0, r1, #32 +; V7A-NEXT: subs r4, r1, #32 +; V7A-NEXT: lsl r1, r2, r1 +; V7A-NEXT: lsr r0, r2, r0 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: lslpl r0, r2, r4 +; V7A-NEXT: subs r2, r1, #1 +; V7A-NEXT: sbc r0, r0, #0 +; V7A-NEXT: and r1, r0, r3 +; V7A-NEXT: and r0, r2, r6 +; V7A-NEXT: pop {r4, r6, r11, pc} +; +; V7A-T-LABEL: bzhi64_a3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r1, #32 +; V7A-T-NEXT: movs r2, #1 +; V7A-T-NEXT: ldrd r12, lr, [r0] +; V7A-T-NEXT: subs.w r0, r1, #32 +; V7A-T-NEXT: lsr.w r3, r2, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r3, r2, r0 +; V7A-T-NEXT: lsl.w r0, r2, r1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: subs r0, #1 +; V7A-T-NEXT: sbc r1, r3, #0 +; V7A-T-NEXT: and.w r0, r0, r12 +; V7A-T-NEXT: and.w r1, r1, lr +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_a3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r2, r1 +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r5, #0 +; V6M-NEXT: mov r1, r5 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: subs r2, r0, #1 +; V6M-NEXT: sbcs r1, r5 +; V6M-NEXT: ldm r4!, {r0, r3} +; V6M-NEXT: ands r1, r3 +; V6M-NEXT: ands r0, r2 +; V6M-NEXT: pop {r4, r5, r7, pc} + %val = load i64, ptr %w + %conv = zext i8 %numlowbits to i64 + %onebit = shl i64 1, %conv + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_a4_commutative(i64 %val, i64 %numlowbits) nounwind { +; CHECK-LABEL: bzhi64_a4_commutative: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r7, lr} +; CHECK-NEXT: push {r7, lr} +; CHECK-NEXT: rsb.w r3, r2, #32 +; CHECK-NEXT: mov.w r12, #1 +; CHECK-NEXT: subs.w lr, r2, #32 +; CHECK-NEXT: lsl.w r2, r12, r2 +; CHECK-NEXT: lsr.w r3, r12, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl.w r3, r12, lr +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r2, #0 +; CHECK-NEXT: subs r2, #1 +; CHECK-NEXT: sbc r3, r3, #0 +; CHECK-NEXT: ands r0, r2 +; CHECK-NEXT: ands r1, r3 +; CHECK-NEXT: pop {r7, pc} +; +; V7M-LABEL: bzhi64_a4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: mov.w r12, #1 +; V7M-NEXT: subs.w lr, r2, #32 +; V7M-NEXT: lsl.w r2, r12, r2 +; V7M-NEXT: lsr.w r3, r12, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r3, r12, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: subs r2, #1 +; V7M-NEXT: sbc r3, r3, #0 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bzhi64_a4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: lsr lr, r12, r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsl r2, r12, r2 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: lslpl lr, r12, r3 +; V7A-NEXT: subs r2, r2, #1 +; V7A-NEXT: sbc r3, lr, #0 +; V7A-NEXT: and r0, r0, r2 +; V7A-NEXT: and r1, r1, r3 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_a4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: mov.w r12, #1 +; V7A-T-NEXT: subs.w lr, r2, #32 +; V7A-T-NEXT: lsl.w r2, r12, r2 +; V7A-T-NEXT: lsr.w r3, r12, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r3, r12, lr +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: subs r2, #1 +; V7A-T-NEXT: sbc r3, r3, #0 +; V7A-T-NEXT: ands r0, r2 +; V7A-T-NEXT: ands r1, r3 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_a4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, lr} +; V6M-NEXT: push {r4, r5, r6, lr} +; V6M-NEXT: mov r5, r1 +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r6, #0 +; V6M-NEXT: mov r1, r6 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: subs r0, r0, #1 +; V6M-NEXT: sbcs r1, r6 +; V6M-NEXT: ands r1, r5 +; V6M-NEXT: ands r0, r4 +; V6M-NEXT: pop {r4, r5, r6, pc} + %onebit = shl i64 1, %numlowbits + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %val, %mask ; swapped order + ret i64 %masked +} + +; ---------------------------------------------------------------------------- ; +; Pattern b. 32-bit +; ---------------------------------------------------------------------------- ; + +define i32 @bzhi32_b0(i32 %val, i32 %numlowbits) nounwind { +; CHECK-LABEL: bzhi32_b0: +; CHECK: @ %bb.0: +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: lsl.w r1, r2, r1 +; CHECK-NEXT: bics r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi32_b0: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: bics r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_b0: +; V7A: @ %bb.0: +; V7A-NEXT: mvn r2, #0 +; V7A-NEXT: bic r0, r0, r2, lsl r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_b0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: bics r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_b0: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #0 +; V6M-NEXT: mvns r2, r2 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: bics r0, r2 +; V6M-NEXT: bx lr + %notmask = shl i32 -1, %numlowbits + %mask = xor i32 %notmask, -1 + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_b1_indexzext(i32 %val, i8 zeroext %numlowbits) nounwind { +; CHECK-LABEL: bzhi32_b1_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: lsl.w r1, r2, r1 +; CHECK-NEXT: bics r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi32_b1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: bics r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_b1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: mvn r2, #0 +; V7A-NEXT: bic r0, r0, r2, lsl r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_b1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: bics r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_b1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #0 +; V6M-NEXT: mvns r2, r2 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: bics r0, r2 +; V6M-NEXT: bx lr + %conv = zext i8 %numlowbits to i32 + %notmask = shl i32 -1, %conv + %mask = xor i32 %notmask, -1 + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_b2_load(ptr %w, i32 %numlowbits) nounwind { +; CHECK-LABEL: bzhi32_b2_load: +; CHECK: @ %bb.0: +; CHECK-NEXT: ldr r0, [r0] +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: lsl.w r1, r2, r1 +; CHECK-NEXT: bics r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi32_b2_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: bics r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_b2_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mvn r2, #0 +; V7A-NEXT: bic r0, r0, r2, lsl r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_b2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: bics r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_b2_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #0 +; V6M-NEXT: mvns r2, r2 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: bics r0, r2 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %notmask = shl i32 -1, %numlowbits + %mask = xor i32 %notmask, -1 + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_b3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind { +; CHECK-LABEL: bzhi32_b3_load_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: ldr r0, [r0] +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: lsl.w r1, r2, r1 +; CHECK-NEXT: bics r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi32_b3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: bics r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_b3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mvn r2, #0 +; V7A-NEXT: bic r0, r0, r2, lsl r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_b3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: bics r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_b3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #0 +; V6M-NEXT: mvns r2, r2 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: bics r0, r2 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %conv = zext i8 %numlowbits to i32 + %notmask = shl i32 -1, %conv + %mask = xor i32 %notmask, -1 + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_b4_commutative(i32 %val, i32 %numlowbits) nounwind { +; CHECK-LABEL: bzhi32_b4_commutative: +; CHECK: @ %bb.0: +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: lsl.w r1, r2, r1 +; CHECK-NEXT: bics r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi32_b4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: bics r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_b4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: mvn r2, #0 +; V7A-NEXT: bic r0, r0, r2, lsl r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_b4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: bics r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_b4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #0 +; V6M-NEXT: mvns r2, r2 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: bics r0, r2 +; V6M-NEXT: bx lr + %notmask = shl i32 -1, %numlowbits + %mask = xor i32 %notmask, -1 + %masked = and i32 %val, %mask ; swapped order + ret i32 %masked +} + +; 64-bit + +define i64 @bzhi64_b0(i64 %val, i64 %numlowbits) nounwind { +; CHECK-LABEL: bzhi64_b0: +; CHECK: @ %bb.0: +; CHECK-NEXT: mov.w r3, #-1 +; CHECK-NEXT: lsl.w r12, r3, r2 +; CHECK-NEXT: subs r2, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl.w r12, #0 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl r3, r2 +; CHECK-NEXT: bic.w r0, r0, r12 +; CHECK-NEXT: bics r1, r3 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi64_b0: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsl.w r12, r3, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl.w r12, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl r3, r2 +; V7M-NEXT: bic.w r0, r0, r12 +; V7M-NEXT: bics r1, r3 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_b0: +; V7A: @ %bb.0: +; V7A-NEXT: subs r12, r2, #32 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsl r2, r3, r2 +; V7A-NEXT: lslpl r3, r3, r12 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: bic r1, r1, r3 +; V7A-NEXT: bic r0, r0, r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_b0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsl.w r12, r3, r2 +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl.w r12, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl r3, r2 +; V7A-T-NEXT: bic.w r0, r0, r12 +; V7A-T-NEXT: bics r1, r3 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_b0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r5, r0 +; V6M-NEXT: bics r4, r1 +; V6M-NEXT: mov r0, r5 +; V6M-NEXT: mov r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %notmask = shl i64 -1, %numlowbits + %mask = xor i64 %notmask, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_b1_indexzext(i64 %val, i8 zeroext %numlowbits) nounwind { +; CHECK-LABEL: bzhi64_b1_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: mov.w r3, #-1 +; CHECK-NEXT: lsl.w r12, r3, r2 +; CHECK-NEXT: subs r2, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl.w r12, #0 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl r3, r2 +; CHECK-NEXT: bic.w r0, r0, r12 +; CHECK-NEXT: bics r1, r3 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi64_b1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsl.w r12, r3, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl.w r12, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl r3, r2 +; V7M-NEXT: bic.w r0, r0, r12 +; V7M-NEXT: bics r1, r3 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_b1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: subs r12, r2, #32 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsl r2, r3, r2 +; V7A-NEXT: lslpl r3, r3, r12 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: bic r1, r1, r3 +; V7A-NEXT: bic r0, r0, r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_b1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsl.w r12, r3, r2 +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl.w r12, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl r3, r2 +; V7A-T-NEXT: bic.w r0, r0, r12 +; V7A-T-NEXT: bics r1, r3 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_b1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r5, r0 +; V6M-NEXT: bics r4, r1 +; V6M-NEXT: mov r0, r5 +; V6M-NEXT: mov r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %conv = zext i8 %numlowbits to i64 + %notmask = shl i64 -1, %conv + %mask = xor i64 %notmask, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_b2_load(ptr %w, i64 %numlowbits) nounwind { +; CHECK-LABEL: bzhi64_b2_load: +; CHECK: @ %bb.0: +; CHECK-NEXT: mov.w r1, #-1 +; CHECK-NEXT: subs.w r12, r2, #32 +; CHECK-NEXT: lsl.w r3, r1, r2 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r3, #0 +; CHECK-NEXT: ldrd r0, r2, [r0] +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl.w r1, r1, r12 +; CHECK-NEXT: bics r0, r3 +; CHECK-NEXT: bic.w r1, r2, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi64_b2_load: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r1, #-1 +; V7M-NEXT: subs.w r12, r2, #32 +; V7M-NEXT: lsl.w r3, r1, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: ldrd r0, r2, [r0] +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, r1, r12 +; V7M-NEXT: bics r0, r3 +; V7M-NEXT: bic.w r1, r2, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_b2_load: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, lr} +; V7A-NEXT: push {r4, lr} +; V7A-NEXT: ldr r4, [r0] +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: ldr r3, [r0, #4] +; V7A-NEXT: subs r0, r2, #32 +; V7A-NEXT: lsl r2, r1, r2 +; V7A-NEXT: lslpl r1, r1, r0 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: bic r1, r3, r1 +; V7A-NEXT: bic r0, r4, r2 +; V7A-NEXT: pop {r4, pc} +; +; V7A-T-LABEL: bzhi64_b2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r1, #-1 +; V7A-T-NEXT: ldrd r0, r12, [r0] +; V7A-T-NEXT: lsl.w r3, r1, r2 +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r3, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl r1, r2 +; V7A-T-NEXT: bics r0, r3 +; V7A-T-NEXT: bic.w r1, r12, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_b2_load: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: ldm r4!, {r2, r3} +; V6M-NEXT: bics r2, r0 +; V6M-NEXT: bics r3, r1 +; V6M-NEXT: mov r0, r2 +; V6M-NEXT: mov r1, r3 +; V6M-NEXT: pop {r4, pc} + %val = load i64, ptr %w + %notmask = shl i64 -1, %numlowbits + %mask = xor i64 %notmask, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_b3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind { +; CHECK-LABEL: bzhi64_b3_load_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: subs.w r12, r1, #32 +; CHECK-NEXT: lsl.w r3, r2, r1 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r3, #0 +; CHECK-NEXT: ldrd r0, r1, [r0] +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl.w r2, r2, r12 +; CHECK-NEXT: bics r1, r2 +; CHECK-NEXT: bics r0, r3 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi64_b3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: subs.w r12, r1, #32 +; V7M-NEXT: lsl.w r3, r2, r1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: ldrd r0, r1, [r0] +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r2, r2, r12 +; V7M-NEXT: bics r1, r2 +; V7M-NEXT: bics r0, r3 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_b3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r6, r11, lr} +; V7A-NEXT: push {r4, r6, r11, lr} +; V7A-NEXT: mvn r2, #0 +; V7A-NEXT: ldr r6, [r0] +; V7A-NEXT: ldr r3, [r0, #4] +; V7A-NEXT: subs r0, r1, #32 +; V7A-NEXT: lsl r4, r2, r1 +; V7A-NEXT: lslpl r2, r2, r0 +; V7A-NEXT: movwpl r4, #0 +; V7A-NEXT: bic r1, r3, r2 +; V7A-NEXT: bic r0, r6, r4 +; V7A-NEXT: pop {r4, r6, r11, pc} +; +; V7A-T-LABEL: bzhi64_b3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: ldrd r0, r12, [r0] +; V7A-T-NEXT: lsl.w r3, r2, r1 +; V7A-T-NEXT: subs r1, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r3, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl r2, r1 +; V7A-T-NEXT: bics r0, r3 +; V7A-T-NEXT: bic.w r1, r12, r2 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_b3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: mov r2, r1 +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: ldm r4!, {r2, r3} +; V6M-NEXT: bics r2, r0 +; V6M-NEXT: bics r3, r1 +; V6M-NEXT: mov r0, r2 +; V6M-NEXT: mov r1, r3 +; V6M-NEXT: pop {r4, pc} + %val = load i64, ptr %w + %conv = zext i8 %numlowbits to i64 + %notmask = shl i64 -1, %conv + %mask = xor i64 %notmask, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_b4_commutative(i64 %val, i64 %numlowbits) nounwind { +; CHECK-LABEL: bzhi64_b4_commutative: +; CHECK: @ %bb.0: +; CHECK-NEXT: mov.w r3, #-1 +; CHECK-NEXT: lsl.w r12, r3, r2 +; CHECK-NEXT: subs r2, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl.w r12, #0 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl r3, r2 +; CHECK-NEXT: bic.w r0, r0, r12 +; CHECK-NEXT: bics r1, r3 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi64_b4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsl.w r12, r3, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl.w r12, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl r3, r2 +; V7M-NEXT: bic.w r0, r0, r12 +; V7M-NEXT: bics r1, r3 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_b4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: subs r12, r2, #32 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsl r2, r3, r2 +; V7A-NEXT: lslpl r3, r3, r12 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: bic r1, r1, r3 +; V7A-NEXT: bic r0, r0, r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_b4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsl.w r12, r3, r2 +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl.w r12, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl r3, r2 +; V7A-T-NEXT: bic.w r0, r0, r12 +; V7A-T-NEXT: bics r1, r3 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_b4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r5, r0 +; V6M-NEXT: bics r4, r1 +; V6M-NEXT: mov r0, r5 +; V6M-NEXT: mov r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %notmask = shl i64 -1, %numlowbits + %mask = xor i64 %notmask, -1 + %masked = and i64 %val, %mask ; swapped order + ret i64 %masked +} + +; ---------------------------------------------------------------------------- ; +; Pattern c. 32-bit +; ---------------------------------------------------------------------------- ; + +define i32 @bzhi32_c0(i32 %val, i32 %numlowbits) nounwind { +; CHECK-LABEL: bzhi32_c0: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsb.w r1, r1, #32 +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: lsr.w r1, r2, r1 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi32_c0: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_c0: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_c0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_c0: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %numhighbits = sub i32 32, %numlowbits + %mask = lshr i32 -1, %numhighbits + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_c1_indexzext(i32 %val, i8 %numlowbits) nounwind { +; CHECK-LABEL: bzhi32_c1_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsb.w r1, r1, #32 +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: uxtb r1, r1 +; CHECK-NEXT: lsr.w r1, r2, r1 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi32_c1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_c1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_c1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_c1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %numhighbits = sub i8 32, %numlowbits + %sh_prom = zext i8 %numhighbits to i32 + %mask = lshr i32 -1, %sh_prom + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_c2_load(ptr %w, i32 %numlowbits) nounwind { +; CHECK-LABEL: bzhi32_c2_load: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsb.w r1, r1, #32 +; CHECK-NEXT: ldr r0, [r0] +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: lsr.w r1, r2, r1 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi32_c2_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_c2_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_c2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_c2_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %numhighbits = sub i32 32, %numlowbits + %mask = lshr i32 -1, %numhighbits + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind { +; CHECK-LABEL: bzhi32_c3_load_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsb.w r1, r1, #32 +; CHECK-NEXT: ldr r0, [r0] +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: uxtb r1, r1 +; CHECK-NEXT: lsr.w r1, r2, r1 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi32_c3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_c3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_c3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_c3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %numhighbits = sub i8 32, %numlowbits + %sh_prom = zext i8 %numhighbits to i32 + %mask = lshr i32 -1, %sh_prom + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_c4_commutative(i32 %val, i32 %numlowbits) nounwind { +; CHECK-LABEL: bzhi32_c4_commutative: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsb.w r1, r1, #32 +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: lsr.w r1, r2, r1 +; CHECK-NEXT: ands r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi32_c4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_c4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_c4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_c4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %numhighbits = sub i32 32, %numlowbits + %mask = lshr i32 -1, %numhighbits + %masked = and i32 %val, %mask ; swapped order + ret i32 %masked +} + +; 64-bit + +define i64 @bzhi64_c0(i64 %val, i64 %numlowbits) nounwind { +; CHECK-LABEL: bzhi64_c0: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r7, lr} +; CHECK-NEXT: push {r7, lr} +; CHECK-NEXT: rsbs.w lr, r2, #32 +; CHECK-NEXT: rsb.w r2, r2, #64 +; CHECK-NEXT: mov.w r12, #-1 +; CHECK-NEXT: mov.w r3, #-1 +; CHECK-NEXT: lsr.w r2, r12, r2 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r3, r3, lr +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r2, #0 +; CHECK-NEXT: ands r0, r3 +; CHECK-NEXT: ands r1, r2 +; CHECK-NEXT: pop {r7, pc} +; +; V7M-LABEL: bzhi64_c0: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsbs.w lr, r2, #32 +; V7M-NEXT: rsb.w r2, r2, #64 +; V7M-NEXT: mov.w r12, #-1 +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsr.w r2, r12, r2 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r3, r3, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: ands r0, r3 +; V7M-NEXT: ands r1, r2 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bzhi64_c0: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsbs lr, r2, #32 +; V7A-NEXT: rsb r2, r2, #64 +; V7A-NEXT: mvn r12, #0 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsr r2, r12, r2 +; V7A-NEXT: lsrpl r3, r3, lr +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: and r0, r3, r0 +; V7A-NEXT: and r1, r2, r1 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_c0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsbs.w lr, r2, #32 +; V7A-T-NEXT: rsb.w r2, r2, #64 +; V7A-T-NEXT: mov.w r12, #-1 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsr.w r2, r12, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r3, r3, lr +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: ands r0, r3 +; V7A-T-NEXT: ands r1, r2 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_c0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: movs r0, #64 +; V6M-NEXT: subs r2, r0, r2 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %numhighbits = sub i64 64, %numlowbits + %mask = lshr i64 -1, %numhighbits + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_c1_indexzext(i64 %val, i8 %numlowbits) nounwind { +; CHECK-LABEL: bzhi64_c1_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsb.w r2, r2, #64 +; CHECK-NEXT: mov.w r3, #-1 +; CHECK-NEXT: uxtb r2, r2 +; CHECK-NEXT: subs.w r12, r2, #32 +; CHECK-NEXT: lsr.w r2, r3, r2 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r3, r3, r12 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r2, #0 +; CHECK-NEXT: ands r0, r3 +; CHECK-NEXT: ands r1, r2 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi64_c1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r2, r2, #64 +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: uxtb r2, r2 +; V7M-NEXT: subs.w r12, r2, #32 +; V7M-NEXT: lsr.w r2, r3, r2 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r3, r3, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: ands r0, r3 +; V7M-NEXT: ands r1, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_c1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsb lr, r2, #64 +; V7A-NEXT: mvn r2, #31 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: uxtb r12, lr +; V7A-NEXT: uxtab r2, r2, lr +; V7A-NEXT: lsr r12, r3, r12 +; V7A-NEXT: cmp r2, #0 +; V7A-NEXT: movwpl r12, #0 +; V7A-NEXT: lsrpl r3, r3, r2 +; V7A-NEXT: and r1, r12, r1 +; V7A-NEXT: and r0, r3, r0 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_c1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w lr, r2, #64 +; V7A-T-NEXT: mvn r2, #31 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: uxtb.w r12, lr +; V7A-T-NEXT: uxtab r2, r2, lr +; V7A-T-NEXT: lsr.w r12, r3, r12 +; V7A-T-NEXT: cmp r2, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl.w r12, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl r3, r2 +; V7A-T-NEXT: and.w r1, r1, r12 +; V7A-T-NEXT: ands r0, r3 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_c1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: movs r0, #64 +; V6M-NEXT: subs r0, r0, r2 +; V6M-NEXT: uxtb r2, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %numhighbits = sub i8 64, %numlowbits + %sh_prom = zext i8 %numhighbits to i64 + %mask = lshr i64 -1, %sh_prom + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_c2_load(ptr %w, i64 %numlowbits) nounwind { +; CHECK-LABEL: bzhi64_c2_load: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsbs.w r1, r2, #32 +; CHECK-NEXT: mov.w r3, #-1 +; CHECK-NEXT: rsb.w r2, r2, #64 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl r3, r1 +; CHECK-NEXT: ldrd r0, r1, [r0] +; CHECK-NEXT: mov.w r12, #-1 +; CHECK-NEXT: lsr.w r2, r12, r2 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r2, #0 +; CHECK-NEXT: ands r0, r3 +; CHECK-NEXT: ands r1, r2 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi64_c2_load: +; V7M: @ %bb.0: +; V7M-NEXT: rsbs.w r1, r2, #32 +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: rsb.w r2, r2, #64 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl r3, r1 +; V7M-NEXT: ldrd r0, r1, [r0] +; V7M-NEXT: mov.w r12, #-1 +; V7M-NEXT: lsr.w r2, r12, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: ands r0, r3 +; V7M-NEXT: ands r1, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_c2_load: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r5, lr} +; V7A-NEXT: push {r5, lr} +; V7A-NEXT: rsbs r1, r2, #32 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: mvn r12, #0 +; V7A-NEXT: ldm r0, {r0, r5} +; V7A-NEXT: lsrpl r3, r3, r1 +; V7A-NEXT: rsb r1, r2, #64 +; V7A-NEXT: and r0, r3, r0 +; V7A-NEXT: lsr r1, r12, r1 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: and r1, r1, r5 +; V7A-NEXT: pop {r5, pc} +; +; V7A-T-LABEL: bzhi64_c2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsbs.w r1, r2, #32 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: ldrd r0, lr, [r0] +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl r3, r1 +; V7A-T-NEXT: rsb.w r1, r2, #64 +; V7A-T-NEXT: mov.w r12, #-1 +; V7A-T-NEXT: and.w r0, r0, r3 +; V7A-T-NEXT: lsr.w r1, r12, r1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: and.w r1, r1, lr +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_c2_load: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #64 +; V6M-NEXT: subs r2, r0, r2 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldm r4!, {r2, r3} +; V6M-NEXT: ands r0, r2 +; V6M-NEXT: ands r1, r3 +; V6M-NEXT: pop {r4, pc} + %val = load i64, ptr %w + %numhighbits = sub i64 64, %numlowbits + %mask = lshr i64 -1, %numhighbits + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind { +; CHECK-LABEL: bzhi64_c3_load_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsb.w r1, r1, #64 +; CHECK-NEXT: mov.w r3, #-1 +; CHECK-NEXT: uxtb r1, r1 +; CHECK-NEXT: subs.w r2, r1, #32 +; CHECK-NEXT: lsr.w r1, r3, r1 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl r3, r2 +; CHECK-NEXT: ldrd r0, r2, [r0] +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: ands r1, r2 +; CHECK-NEXT: ands r0, r3 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi64_c3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #64 +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: subs.w r2, r1, #32 +; V7M-NEXT: lsr.w r1, r3, r1 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl r3, r2 +; V7M-NEXT: ldrd r0, r2, [r0] +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: ands r1, r2 +; V7M-NEXT: ands r0, r3 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_c3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r6, r11, lr} +; V7A-NEXT: push {r4, r6, r11, lr} +; V7A-NEXT: rsb r1, r1, #64 +; V7A-NEXT: mvn r4, #31 +; V7A-NEXT: mvn r2, #0 +; V7A-NEXT: ldr r6, [r0] +; V7A-NEXT: ldr r3, [r0, #4] +; V7A-NEXT: uxtb r0, r1 +; V7A-NEXT: uxtab r4, r4, r1 +; V7A-NEXT: lsr r0, r2, r0 +; V7A-NEXT: cmp r4, #0 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: and r1, r0, r3 +; V7A-NEXT: lsrpl r2, r2, r4 +; V7A-NEXT: and r0, r2, r6 +; V7A-NEXT: pop {r4, r6, r11, pc} +; +; V7A-T-LABEL: bzhi64_c3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r1, r1, #64 +; V7A-T-NEXT: mvn r3, #31 +; V7A-T-NEXT: ldrd r12, lr, [r0] +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: uxtb r0, r1 +; V7A-T-NEXT: uxtab r3, r3, r1 +; V7A-T-NEXT: lsr.w r0, r2, r0 +; V7A-T-NEXT: cmp r3, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: and.w r1, r0, lr +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl r2, r3 +; V7A-T-NEXT: and.w r0, r2, r12 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_c3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #64 +; V6M-NEXT: subs r0, r0, r1 +; V6M-NEXT: uxtb r2, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldm r4!, {r2, r3} +; V6M-NEXT: ands r0, r2 +; V6M-NEXT: ands r1, r3 +; V6M-NEXT: pop {r4, pc} + %val = load i64, ptr %w + %numhighbits = sub i8 64, %numlowbits + %sh_prom = zext i8 %numhighbits to i64 + %mask = lshr i64 -1, %sh_prom + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_c4_commutative(i64 %val, i64 %numlowbits) nounwind { +; CHECK-LABEL: bzhi64_c4_commutative: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r7, lr} +; CHECK-NEXT: push {r7, lr} +; CHECK-NEXT: rsbs.w lr, r2, #32 +; CHECK-NEXT: rsb.w r2, r2, #64 +; CHECK-NEXT: mov.w r12, #-1 +; CHECK-NEXT: mov.w r3, #-1 +; CHECK-NEXT: lsr.w r2, r12, r2 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r3, r3, lr +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r2, #0 +; CHECK-NEXT: ands r0, r3 +; CHECK-NEXT: ands r1, r2 +; CHECK-NEXT: pop {r7, pc} +; +; V7M-LABEL: bzhi64_c4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsbs.w lr, r2, #32 +; V7M-NEXT: rsb.w r2, r2, #64 +; V7M-NEXT: mov.w r12, #-1 +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsr.w r2, r12, r2 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r3, r3, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: ands r0, r3 +; V7M-NEXT: ands r1, r2 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bzhi64_c4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsbs lr, r2, #32 +; V7A-NEXT: rsb r2, r2, #64 +; V7A-NEXT: mvn r12, #0 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsr r2, r12, r2 +; V7A-NEXT: lsrpl r3, r3, lr +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: and r0, r0, r3 +; V7A-NEXT: and r1, r1, r2 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_c4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsbs.w lr, r2, #32 +; V7A-T-NEXT: rsb.w r2, r2, #64 +; V7A-T-NEXT: mov.w r12, #-1 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsr.w r2, r12, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r3, r3, lr +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: ands r0, r3 +; V7A-T-NEXT: ands r1, r2 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_c4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: movs r0, #64 +; V6M-NEXT: subs r2, r0, r2 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %numhighbits = sub i64 64, %numlowbits + %mask = lshr i64 -1, %numhighbits + %masked = and i64 %val, %mask ; swapped order + ret i64 %masked +} + +; ---------------------------------------------------------------------------- ; +; Pattern d. 32-bit. +; ---------------------------------------------------------------------------- ; + +define i32 @bzhi32_d0(i32 %val, i32 %numlowbits) nounwind { +; CHECK-LABEL: bzhi32_d0: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsb.w r1, r1, #32 +; CHECK-NEXT: lsls r0, r1 +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi32_d0: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_d0: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_d0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_d0: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %numhighbits = sub i32 32, %numlowbits + %highbitscleared = shl i32 %val, %numhighbits + %masked = lshr i32 %highbitscleared, %numhighbits + ret i32 %masked +} + +define i32 @bzhi32_d1_indexzext(i32 %val, i8 %numlowbits) nounwind { +; CHECK-LABEL: bzhi32_d1_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsb.w r1, r1, #32 +; CHECK-NEXT: uxtb r1, r1 +; CHECK-NEXT: lsls r0, r1 +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi32_d1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_d1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_d1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_d1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %numhighbits = sub i8 32, %numlowbits + %sh_prom = zext i8 %numhighbits to i32 + %highbitscleared = shl i32 %val, %sh_prom + %masked = lshr i32 %highbitscleared, %sh_prom + ret i32 %masked +} + +define i32 @bzhi32_d2_load(ptr %w, i32 %numlowbits) nounwind { +; CHECK-LABEL: bzhi32_d2_load: +; CHECK: @ %bb.0: +; CHECK-NEXT: ldr r0, [r0] +; CHECK-NEXT: rsb.w r1, r1, #32 +; CHECK-NEXT: lsls r0, r1 +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi32_d2_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_d2_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_d2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_d2_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %numhighbits = sub i32 32, %numlowbits + %highbitscleared = shl i32 %val, %numhighbits + %masked = lshr i32 %highbitscleared, %numhighbits + ret i32 %masked +} + +define i32 @bzhi32_d3_load_indexzext(ptr %w, i8 %numlowbits) nounwind { +; CHECK-LABEL: bzhi32_d3_load_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsb.w r1, r1, #32 +; CHECK-NEXT: ldr r0, [r0] +; CHECK-NEXT: uxtb r1, r1 +; CHECK-NEXT: lsls r0, r1 +; CHECK-NEXT: lsrs r0, r1 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi32_d3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_d3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_d3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_d3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %numhighbits = sub i8 32, %numlowbits + %sh_prom = zext i8 %numhighbits to i32 + %highbitscleared = shl i32 %val, %sh_prom + %masked = lshr i32 %highbitscleared, %sh_prom + ret i32 %masked +} + +; 64-bit. + +define i64 @bzhi64_d0(i64 %val, i64 %numlowbits) nounwind { +; CHECK-LABEL: bzhi64_d0: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r7, lr} +; CHECK-NEXT: push {r7, lr} +; CHECK-NEXT: rsb.w r3, r2, #64 +; CHECK-NEXT: rsbs.w r2, r2, #32 +; CHECK-NEXT: rsb.w lr, r3, #32 +; CHECK-NEXT: lsl.w r12, r1, r3 +; CHECK-NEXT: lsr.w r1, r0, lr +; CHECK-NEXT: orr.w r1, r1, r12 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl.w r1, r0, r2 +; CHECK-NEXT: lsl.w r0, r0, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r0, #0 +; CHECK-NEXT: lsl.w r12, r1, lr +; CHECK-NEXT: lsr.w r0, r0, r3 +; CHECK-NEXT: orr.w r0, r0, r12 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r1, r2 +; CHECK-NEXT: lsr.w r1, r1, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: pop {r7, pc} +; +; V7M-LABEL: bzhi64_d0: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsb.w r3, r2, #64 +; V7M-NEXT: rsbs.w r2, r2, #32 +; V7M-NEXT: rsb.w lr, r3, #32 +; V7M-NEXT: lsl.w r12, r1, r3 +; V7M-NEXT: lsr.w r1, r0, lr +; V7M-NEXT: orr.w r1, r1, r12 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, r0, r2 +; V7M-NEXT: lsl.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsl.w r12, r1, lr +; V7M-NEXT: lsr.w r0, r0, r3 +; V7M-NEXT: orr.w r0, r0, r12 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: lsr.w r1, r1, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bzhi64_d0: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsb lr, r2, #64 +; V7A-NEXT: rsbs r2, r2, #32 +; V7A-NEXT: rsb r12, lr, #32 +; V7A-NEXT: lsr r3, r0, r12 +; V7A-NEXT: orr r1, r3, r1, lsl lr +; V7A-NEXT: lslpl r1, r0, r2 +; V7A-NEXT: lsl r0, r0, lr +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lsr r0, r0, lr +; V7A-NEXT: orr r0, r0, r1, lsl r12 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: lsr r1, r1, lr +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_d0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #64 +; V7A-T-NEXT: rsbs.w r2, r2, #32 +; V7A-T-NEXT: rsb.w lr, r3, #32 +; V7A-T-NEXT: lsl.w r12, r1, r3 +; V7A-T-NEXT: lsr.w r1, r0, lr +; V7A-T-NEXT: orr.w r1, r1, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r1, r0, r2 +; V7A-T-NEXT: lsl.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsl.w r12, r1, lr +; V7A-T-NEXT: lsr.w r0, r0, r3 +; V7A-T-NEXT: orr.w r0, r0, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: lsr.w r1, r1, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_d0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: movs r3, #64 +; V6M-NEXT: subs r4, r3, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, pc} + %numhighbits = sub i64 64, %numlowbits + %highbitscleared = shl i64 %val, %numhighbits + %masked = lshr i64 %highbitscleared, %numhighbits + ret i64 %masked +} + +define i64 @bzhi64_d1_indexzext(i64 %val, i8 %numlowbits) nounwind { +; CHECK-LABEL: bzhi64_d1_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsb.w r2, r2, #64 +; CHECK-NEXT: uxtb r2, r2 +; CHECK-NEXT: rsb.w r3, r2, #32 +; CHECK-NEXT: lsl.w r12, r1, r2 +; CHECK-NEXT: lsr.w r1, r0, r3 +; CHECK-NEXT: orr.w r1, r1, r12 +; CHECK-NEXT: subs.w r12, r2, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl.w r1, r0, r12 +; CHECK-NEXT: lsl.w r0, r0, r2 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r0, #0 +; CHECK-NEXT: lsl.w r3, r1, r3 +; CHECK-NEXT: lsr.w r0, r0, r2 +; CHECK-NEXT: orr.w r0, r0, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r1, r12 +; CHECK-NEXT: lsr.w r1, r1, r2 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi64_d1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r2, r2, #64 +; V7M-NEXT: uxtb r2, r2 +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsl.w r12, r1, r2 +; V7M-NEXT: lsr.w r1, r0, r3 +; V7M-NEXT: orr.w r1, r1, r12 +; V7M-NEXT: subs.w r12, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, r0, r12 +; V7M-NEXT: lsl.w r0, r0, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: lsr.w r0, r0, r2 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r12 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_d1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsb lr, r2, #64 +; V7A-NEXT: uxtb r3, lr +; V7A-NEXT: rsb r12, r3, #32 +; V7A-NEXT: lsr r2, r0, r12 +; V7A-NEXT: orr r1, r2, r1, lsl r3 +; V7A-NEXT: mvn r2, #31 +; V7A-NEXT: uxtab r2, r2, lr +; V7A-NEXT: cmp r2, #0 +; V7A-NEXT: lslpl r1, r0, r2 +; V7A-NEXT: lsl r0, r0, r3 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lsr r0, r0, r3 +; V7A-NEXT: orr r0, r0, r1, lsl r12 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: lsr r1, r1, r3 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_d1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: rsb.w r4, r2, #64 +; V7A-T-NEXT: mvn r2, #31 +; V7A-T-NEXT: uxtb r3, r4 +; V7A-T-NEXT: rsb.w lr, r3, #32 +; V7A-T-NEXT: lsl.w r12, r1, r3 +; V7A-T-NEXT: uxtab r2, r2, r4 +; V7A-T-NEXT: lsr.w r1, r0, lr +; V7A-T-NEXT: cmp r2, #0 +; V7A-T-NEXT: orr.w r1, r1, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r1, r0, r2 +; V7A-T-NEXT: lsl.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsl.w r4, r1, lr +; V7A-T-NEXT: lsr.w r0, r0, r3 +; V7A-T-NEXT: orr.w r0, r0, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: lsr.w r1, r1, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bzhi64_d1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: movs r3, #64 +; V6M-NEXT: subs r2, r3, r2 +; V6M-NEXT: uxtb r4, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, pc} + %numhighbits = sub i8 64, %numlowbits + %sh_prom = zext i8 %numhighbits to i64 + %highbitscleared = shl i64 %val, %sh_prom + %masked = lshr i64 %highbitscleared, %sh_prom + ret i64 %masked +} + +define i64 @bzhi64_d2_load(ptr %w, i64 %numlowbits) nounwind { +; CHECK-LABEL: bzhi64_d2_load: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r7, lr} +; CHECK-NEXT: push {r7, lr} +; CHECK-NEXT: rsb.w r1, r2, #64 +; CHECK-NEXT: ldrd r0, r3, [r0] +; CHECK-NEXT: rsb.w lr, r1, #32 +; CHECK-NEXT: rsbs.w r2, r2, #32 +; CHECK-NEXT: lsl.w r12, r3, r1 +; CHECK-NEXT: lsr.w r3, r0, lr +; CHECK-NEXT: orr.w r3, r3, r12 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl.w r3, r0, r2 +; CHECK-NEXT: lsl.w r0, r0, r1 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r0, #0 +; CHECK-NEXT: lsl.w r12, r3, lr +; CHECK-NEXT: lsr.w r0, r0, r1 +; CHECK-NEXT: lsr.w r1, r3, r1 +; CHECK-NEXT: orr.w r0, r0, r12 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r3, r2 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: pop {r7, pc} +; +; V7M-LABEL: bzhi64_d2_load: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsb.w r1, r2, #64 +; V7M-NEXT: ldrd r0, r3, [r0] +; V7M-NEXT: rsb.w lr, r1, #32 +; V7M-NEXT: rsbs.w r2, r2, #32 +; V7M-NEXT: lsl.w r12, r3, r1 +; V7M-NEXT: lsr.w r3, r0, lr +; V7M-NEXT: orr.w r3, r3, r12 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r3, r0, r2 +; V7M-NEXT: lsl.w r0, r0, r1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsl.w r12, r3, lr +; V7M-NEXT: lsr.w r0, r0, r1 +; V7M-NEXT: lsr.w r1, r3, r1 +; V7M-NEXT: orr.w r0, r0, r12 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r3, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bzhi64_d2_load: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r5, r7, r11, lr} +; V7A-NEXT: push {r5, r7, r11, lr} +; V7A-NEXT: rsb r3, r2, #64 +; V7A-NEXT: ldm r0, {r0, r7} +; V7A-NEXT: rsb r1, r3, #32 +; V7A-NEXT: rsbs r2, r2, #32 +; V7A-NEXT: lsr r5, r0, r1 +; V7A-NEXT: orr r7, r5, r7, lsl r3 +; V7A-NEXT: lslpl r7, r0, r2 +; V7A-NEXT: lsl r0, r0, r3 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lsr r0, r0, r3 +; V7A-NEXT: orr r0, r0, r7, lsl r1 +; V7A-NEXT: lsr r1, r7, r3 +; V7A-NEXT: lsrpl r0, r7, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: pop {r5, r7, r11, pc} +; +; V7A-T-LABEL: bzhi64_d2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #64 +; V7A-T-NEXT: ldrd r0, r1, [r0] +; V7A-T-NEXT: rsb.w lr, r3, #32 +; V7A-T-NEXT: rsbs.w r2, r2, #32 +; V7A-T-NEXT: lsl.w r12, r1, r3 +; V7A-T-NEXT: lsr.w r1, r0, lr +; V7A-T-NEXT: orr.w r1, r1, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r1, r0, r2 +; V7A-T-NEXT: lsl.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsl.w r12, r1, lr +; V7A-T-NEXT: lsr.w r0, r0, r3 +; V7A-T-NEXT: orr.w r0, r0, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: lsr.w r1, r1, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_d2_load: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: movs r1, #64 +; V6M-NEXT: subs r4, r1, r2 +; V6M-NEXT: ldr r2, [r0] +; V6M-NEXT: ldr r1, [r0, #4] +; V6M-NEXT: mov r0, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, pc} + %val = load i64, ptr %w + %numhighbits = sub i64 64, %numlowbits + %highbitscleared = shl i64 %val, %numhighbits + %masked = lshr i64 %highbitscleared, %numhighbits + ret i64 %masked +} + +define i64 @bzhi64_d3_load_indexzext(ptr %w, i8 %numlowbits) nounwind { +; CHECK-LABEL: bzhi64_d3_load_indexzext: +; CHECK: @ %bb.0: +; CHECK-NEXT: rsb.w r1, r1, #64 +; CHECK-NEXT: ldrd r0, r2, [r0] +; CHECK-NEXT: uxtb r1, r1 +; CHECK-NEXT: rsb.w r3, r1, #32 +; CHECK-NEXT: lsl.w r12, r2, r1 +; CHECK-NEXT: lsr.w r2, r0, r3 +; CHECK-NEXT: orr.w r2, r2, r12 +; CHECK-NEXT: subs.w r12, r1, #32 +; CHECK-NEXT: it pl +; CHECK-NEXT: lslpl.w r2, r0, r12 +; CHECK-NEXT: lsl.w r0, r0, r1 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r0, #0 +; CHECK-NEXT: lsl.w r3, r2, r3 +; CHECK-NEXT: lsr.w r0, r0, r1 +; CHECK-NEXT: lsr.w r1, r2, r1 +; CHECK-NEXT: orr.w r0, r0, r3 +; CHECK-NEXT: it pl +; CHECK-NEXT: lsrpl.w r0, r2, r12 +; CHECK-NEXT: it pl +; CHECK-NEXT: movpl r1, #0 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi64_d3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #64 +; V7M-NEXT: ldrd r0, r2, [r0] +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: rsb.w r3, r1, #32 +; V7M-NEXT: lsl.w r12, r2, r1 +; V7M-NEXT: lsr.w r2, r0, r3 +; V7M-NEXT: orr.w r2, r2, r12 +; V7M-NEXT: subs.w r12, r1, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r2, r0, r12 +; V7M-NEXT: lsl.w r0, r0, r1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsl.w r3, r2, r3 +; V7M-NEXT: lsr.w r0, r0, r1 +; V7M-NEXT: lsr.w r1, r2, r1 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r2, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_d3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r5, r7, r11, lr} +; V7A-NEXT: push {r5, r7, r11, lr} +; V7A-NEXT: rsb r1, r1, #64 +; V7A-NEXT: ldm r0, {r0, r7} +; V7A-NEXT: uxtb r2, r1 +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r5, r0, r3 +; V7A-NEXT: orr r7, r5, r7, lsl r2 +; V7A-NEXT: mvn r5, #31 +; V7A-NEXT: uxtab r1, r5, r1 +; V7A-NEXT: cmp r1, #0 +; V7A-NEXT: lslpl r7, r0, r1 +; V7A-NEXT: lsl r0, r0, r2 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: orr r0, r0, r7, lsl r3 +; V7A-NEXT: lsrpl r0, r7, r1 +; V7A-NEXT: lsr r1, r7, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: pop {r5, r7, r11, pc} +; +; V7A-T-LABEL: bzhi64_d3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: rsb.w r4, r1, #64 +; V7A-T-NEXT: ldrd r0, r2, [r0] +; V7A-T-NEXT: mvn r1, #31 +; V7A-T-NEXT: uxtb r3, r4 +; V7A-T-NEXT: rsb.w lr, r3, #32 +; V7A-T-NEXT: lsl.w r12, r2, r3 +; V7A-T-NEXT: uxtab r1, r1, r4 +; V7A-T-NEXT: lsr.w r2, r0, lr +; V7A-T-NEXT: cmp r1, #0 +; V7A-T-NEXT: orr.w r2, r2, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r2, r0, r1 +; V7A-T-NEXT: lsl.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsl.w r4, r2, lr +; V7A-T-NEXT: lsr.w r0, r0, r3 +; V7A-T-NEXT: orr.w r0, r0, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r2, r1 +; V7A-T-NEXT: lsr.w r1, r2, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bzhi64_d3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: movs r2, #64 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: uxtb r4, r1 +; V6M-NEXT: ldr r2, [r0] +; V6M-NEXT: ldr r1, [r0, #4] +; V6M-NEXT: mov r0, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, pc} + %val = load i64, ptr %w + %numhighbits = sub i8 64, %numlowbits + %sh_prom = zext i8 %numhighbits to i64 + %highbitscleared = shl i64 %val, %sh_prom + %masked = lshr i64 %highbitscleared, %sh_prom + ret i64 %masked +} + +; ---------------------------------------------------------------------------- ; +; Constant mask +; ---------------------------------------------------------------------------- ; + +; 32-bit + +define i32 @bzhi32_constant_mask32(i32 %val) nounwind { +; CHECK-LABEL: bzhi32_constant_mask32: +; CHECK: @ %bb.0: +; CHECK-NEXT: bic r0, r0, #-2147483648 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi32_constant_mask32: +; V7M: @ %bb.0: +; V7M-NEXT: bic r0, r0, #-2147483648 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_constant_mask32: +; V7A: @ %bb.0: +; V7A-NEXT: bic r0, r0, #-2147483648 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_constant_mask32: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: bic r0, r0, #-2147483648 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_constant_mask32: +; V6M: @ %bb.0: +; V6M-NEXT: movs r1, #1 +; V6M-NEXT: lsls r1, r1, #31 +; V6M-NEXT: bics r0, r1 +; V6M-NEXT: bx lr + %masked = and i32 %val, 2147483647 + ret i32 %masked +} + +define i32 @bzhi32_constant_mask32_load(ptr %val) nounwind { +; CHECK-LABEL: bzhi32_constant_mask32_load: +; CHECK: @ %bb.0: +; CHECK-NEXT: ldr r0, [r0] +; CHECK-NEXT: bic r0, r0, #-2147483648 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi32_constant_mask32_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: bic r0, r0, #-2147483648 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_constant_mask32_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: bic r0, r0, #-2147483648 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_constant_mask32_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: bic r0, r0, #-2147483648 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_constant_mask32_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r1, #1 +; V6M-NEXT: lsls r1, r1, #31 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: bics r0, r1 +; V6M-NEXT: bx lr + %val1 = load i32, ptr %val + %masked = and i32 %val1, 2147483647 + ret i32 %masked +} + +define i32 @bzhi32_constant_mask16(i32 %val) nounwind { +; CHECK-LABEL: bzhi32_constant_mask16: +; CHECK: @ %bb.0: +; CHECK-NEXT: bfc r0, #15, #17 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi32_constant_mask16: +; V7M: @ %bb.0: +; V7M-NEXT: bfc r0, #15, #17 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_constant_mask16: +; V7A: @ %bb.0: +; V7A-NEXT: bfc r0, #15, #17 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_constant_mask16: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: bfc r0, #15, #17 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_constant_mask16: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r1, .LCPI41_0 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI41_0: +; V6M-NEXT: .long 32767 @ 0x7fff + %masked = and i32 %val, 32767 + ret i32 %masked +} + +define i32 @bzhi32_constant_mask16_load(ptr %val) nounwind { +; CHECK-LABEL: bzhi32_constant_mask16_load: +; CHECK: @ %bb.0: +; CHECK-NEXT: ldr r0, [r0] +; CHECK-NEXT: bfc r0, #15, #17 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi32_constant_mask16_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: bfc r0, #15, #17 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_constant_mask16_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: bfc r0, #15, #17 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_constant_mask16_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: bfc r0, #15, #17 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_constant_mask16_load: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r1, [r0] +; V6M-NEXT: ldr r0, .LCPI42_0 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI42_0: +; V6M-NEXT: .long 32767 @ 0x7fff + %val1 = load i32, ptr %val + %masked = and i32 %val1, 32767 + ret i32 %masked +} + +define i32 @bzhi32_constant_mask8(i32 %val) nounwind { +; CHECK-LABEL: bzhi32_constant_mask8: +; CHECK: @ %bb.0: +; CHECK-NEXT: and r0, r0, #127 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi32_constant_mask8: +; V7M: @ %bb.0: +; V7M-NEXT: and r0, r0, #127 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_constant_mask8: +; V7A: @ %bb.0: +; V7A-NEXT: and r0, r0, #127 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_constant_mask8: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: and r0, r0, #127 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_constant_mask8: +; V6M: @ %bb.0: +; V6M-NEXT: movs r1, #127 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %masked = and i32 %val, 127 + ret i32 %masked +} + +define i32 @bzhi32_constant_mask8_load(ptr %val) nounwind { +; CHECK-LABEL: bzhi32_constant_mask8_load: +; CHECK: @ %bb.0: +; CHECK-NEXT: ldr r0, [r0] +; CHECK-NEXT: and r0, r0, #127 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi32_constant_mask8_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: and r0, r0, #127 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_constant_mask8_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: and r0, r0, #127 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_constant_mask8_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: and r0, r0, #127 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_constant_mask8_load: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r1, [r0] +; V6M-NEXT: movs r0, #127 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %val1 = load i32, ptr %val + %masked = and i32 %val1, 127 + ret i32 %masked +} + +; 64-bit + +define i64 @bzhi64_constant_mask64(i64 %val) nounwind { +; CHECK-LABEL: bzhi64_constant_mask64: +; CHECK: @ %bb.0: +; CHECK-NEXT: bic r1, r1, #-1073741824 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi64_constant_mask64: +; V7M: @ %bb.0: +; V7M-NEXT: bic r1, r1, #-1073741824 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_constant_mask64: +; V7A: @ %bb.0: +; V7A-NEXT: bic r1, r1, #-1073741824 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_constant_mask64: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: bic r1, r1, #-1073741824 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_constant_mask64: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #3 +; V6M-NEXT: lsls r2, r2, #30 +; V6M-NEXT: bics r1, r2 +; V6M-NEXT: bx lr + %masked = and i64 %val, 4611686018427387903 + ret i64 %masked +} + +define i64 @bzhi64_constant_mask64_load(ptr %val) nounwind { +; CHECK-LABEL: bzhi64_constant_mask64_load: +; CHECK: @ %bb.0: +; CHECK-NEXT: ldrd r0, r1, [r0] +; CHECK-NEXT: bic r1, r1, #-1073741824 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi64_constant_mask64_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldrd r0, r1, [r0] +; V7M-NEXT: bic r1, r1, #-1073741824 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_constant_mask64_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldrd r0, r1, [r0] +; V7A-NEXT: bic r1, r1, #-1073741824 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_constant_mask64_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldrd r0, r1, [r0] +; V7A-T-NEXT: bic r1, r1, #-1073741824 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_constant_mask64_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r1, #3 +; V6M-NEXT: lsls r3, r1, #30 +; V6M-NEXT: ldr r2, [r0] +; V6M-NEXT: ldr r1, [r0, #4] +; V6M-NEXT: bics r1, r3 +; V6M-NEXT: mov r0, r2 +; V6M-NEXT: bx lr + %val1 = load i64, ptr %val + %masked = and i64 %val1, 4611686018427387903 + ret i64 %masked +} + +define i64 @bzhi64_constant_mask32(i64 %val) nounwind { +; CHECK-LABEL: bzhi64_constant_mask32: +; CHECK: @ %bb.0: +; CHECK-NEXT: bic r0, r0, #-2147483648 +; CHECK-NEXT: movs r1, #0 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi64_constant_mask32: +; V7M: @ %bb.0: +; V7M-NEXT: bic r0, r0, #-2147483648 +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_constant_mask32: +; V7A: @ %bb.0: +; V7A-NEXT: bic r0, r0, #-2147483648 +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_constant_mask32: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: bic r0, r0, #-2147483648 +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_constant_mask32: +; V6M: @ %bb.0: +; V6M-NEXT: movs r1, #1 +; V6M-NEXT: lsls r1, r1, #31 +; V6M-NEXT: bics r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr + %masked = and i64 %val, 2147483647 + ret i64 %masked +} + +define i64 @bzhi64_constant_mask32_load(ptr %val) nounwind { +; CHECK-LABEL: bzhi64_constant_mask32_load: +; CHECK: @ %bb.0: +; CHECK-NEXT: ldr r0, [r0] +; CHECK-NEXT: movs r1, #0 +; CHECK-NEXT: bic r0, r0, #-2147483648 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi64_constant_mask32_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bic r0, r0, #-2147483648 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_constant_mask32_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bic r0, r0, #-2147483648 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_constant_mask32_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bic r0, r0, #-2147483648 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_constant_mask32_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r1, #1 +; V6M-NEXT: lsls r1, r1, #31 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: bics r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr + %val1 = load i64, ptr %val + %masked = and i64 %val1, 2147483647 + ret i64 %masked +} + +define i64 @bzhi64_constant_mask16(i64 %val) nounwind { +; CHECK-LABEL: bzhi64_constant_mask16: +; CHECK: @ %bb.0: +; CHECK-NEXT: bfc r0, #15, #17 +; CHECK-NEXT: movs r1, #0 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi64_constant_mask16: +; V7M: @ %bb.0: +; V7M-NEXT: bfc r0, #15, #17 +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_constant_mask16: +; V7A: @ %bb.0: +; V7A-NEXT: bfc r0, #15, #17 +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_constant_mask16: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: bfc r0, #15, #17 +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_constant_mask16: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r1, .LCPI49_0 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI49_0: +; V6M-NEXT: .long 32767 @ 0x7fff + %masked = and i64 %val, 32767 + ret i64 %masked +} + +define i64 @bzhi64_constant_mask16_load(ptr %val) nounwind { +; CHECK-LABEL: bzhi64_constant_mask16_load: +; CHECK: @ %bb.0: +; CHECK-NEXT: ldr r0, [r0] +; CHECK-NEXT: movs r1, #0 +; CHECK-NEXT: bfc r0, #15, #17 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi64_constant_mask16_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bfc r0, #15, #17 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_constant_mask16_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bfc r0, #15, #17 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_constant_mask16_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bfc r0, #15, #17 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_constant_mask16_load: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r1, [r0] +; V6M-NEXT: ldr r0, .LCPI50_0 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI50_0: +; V6M-NEXT: .long 32767 @ 0x7fff + %val1 = load i64, ptr %val + %masked = and i64 %val1, 32767 + ret i64 %masked +} + +define i64 @bzhi64_constant_mask8(i64 %val) nounwind { +; CHECK-LABEL: bzhi64_constant_mask8: +; CHECK: @ %bb.0: +; CHECK-NEXT: and r0, r0, #127 +; CHECK-NEXT: movs r1, #0 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi64_constant_mask8: +; V7M: @ %bb.0: +; V7M-NEXT: and r0, r0, #127 +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_constant_mask8: +; V7A: @ %bb.0: +; V7A-NEXT: and r0, r0, #127 +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_constant_mask8: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: and r0, r0, #127 +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_constant_mask8: +; V6M: @ %bb.0: +; V6M-NEXT: movs r1, #127 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr + %masked = and i64 %val, 127 + ret i64 %masked +} + +define i64 @bzhi64_constant_mask8_load(ptr %val) nounwind { +; CHECK-LABEL: bzhi64_constant_mask8_load: +; CHECK: @ %bb.0: +; CHECK-NEXT: ldr r0, [r0] +; CHECK-NEXT: movs r1, #0 +; CHECK-NEXT: and r0, r0, #127 +; CHECK-NEXT: bx lr +; +; V7M-LABEL: bzhi64_constant_mask8_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: and r0, r0, #127 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_constant_mask8_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: and r0, r0, #127 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_constant_mask8_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: and r0, r0, #127 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_constant_mask8_load: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r1, [r0] +; V6M-NEXT: movs r0, #127 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr + %val1 = load i64, ptr %val + %masked = and i64 %val1, 127 + ret i64 %masked +} diff --git a/llvm/test/CodeGen/X86/and-mask-variable.ll b/llvm/test/CodeGen/X86/and-mask-variable.ll new file mode 100644 index 0000000000000..844a413391d75 --- /dev/null +++ b/llvm/test/CodeGen/X86/and-mask-variable.ll @@ -0,0 +1,356 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X86,X86-NOBMI +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,-tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X86,X86-BMINOTBM,X86-BMI1 +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,+tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X86,X86-BMITBM,X86-BMI1 +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,+tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X86,X86-BMITBM,X86-BMI2 +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,-tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X86,X86-BMINOTBM,X86-BMI2 +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X64,X64-NOBMI +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,-tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X64,X64-BMINOTBM,X64-BMI1 +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,+tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X64,X64-BMITBM,X64-BMI1 +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,+tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X64,X64-BMITBM,X64-BMI2 +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,-tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X64,X64-BMINOTBM,X64-BMI2 + +define i32 @mask_pair(i32 %x, i32 %y) { +; X86-NOBMI-LABEL: mask_pair: +; X86-NOBMI: # %bb.0: +; X86-NOBMI-NEXT: movzbl {{[0-9]+}}(%esp), %ecx +; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NOBMI-NEXT: shrl %cl, %eax +; X86-NOBMI-NEXT: shll %cl, %eax +; X86-NOBMI-NEXT: retl +; +; X86-BMI1-LABEL: mask_pair: +; X86-BMI1: # %bb.0: +; X86-BMI1-NEXT: movzbl {{[0-9]+}}(%esp), %ecx +; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-BMI1-NEXT: shrl %cl, %eax +; X86-BMI1-NEXT: shll %cl, %eax +; X86-BMI1-NEXT: retl +; +; X86-BMI2-LABEL: mask_pair: +; X86-BMI2: # %bb.0: +; X86-BMI2-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; X86-BMI2-NEXT: shrxl %eax, {{[0-9]+}}(%esp), %ecx +; X86-BMI2-NEXT: shlxl %eax, %ecx, %eax +; X86-BMI2-NEXT: retl +; +; X64-NOBMI-LABEL: mask_pair: +; X64-NOBMI: # %bb.0: +; X64-NOBMI-NEXT: movl %esi, %ecx +; X64-NOBMI-NEXT: movl %edi, %eax +; X64-NOBMI-NEXT: shrl %cl, %eax +; X64-NOBMI-NEXT: # kill: def $cl killed $cl killed $ecx +; X64-NOBMI-NEXT: shll %cl, %eax +; X64-NOBMI-NEXT: retq +; +; X64-BMI1-LABEL: mask_pair: +; X64-BMI1: # %bb.0: +; X64-BMI1-NEXT: movl %esi, %ecx +; X64-BMI1-NEXT: movl %edi, %eax +; X64-BMI1-NEXT: shrl %cl, %eax +; X64-BMI1-NEXT: # kill: def $cl killed $cl killed $ecx +; X64-BMI1-NEXT: shll %cl, %eax +; X64-BMI1-NEXT: retq +; +; X64-BMI2-LABEL: mask_pair: +; X64-BMI2: # %bb.0: +; X64-BMI2-NEXT: shrxl %esi, %edi, %eax +; X64-BMI2-NEXT: shlxl %esi, %eax, %eax +; X64-BMI2-NEXT: retq + %shl = shl nsw i32 -1, %y + %and = and i32 %shl, %x + ret i32 %and +} + +define i64 @mask_pair_64(i64 %x, i64 %y) { +; X86-NOBMI-LABEL: mask_pair_64: +; X86-NOBMI: # %bb.0: +; X86-NOBMI-NEXT: movzbl {{[0-9]+}}(%esp), %ecx +; X86-NOBMI-NEXT: movl $-1, %edx +; X86-NOBMI-NEXT: movl $-1, %eax +; X86-NOBMI-NEXT: shll %cl, %eax +; X86-NOBMI-NEXT: testb $32, %cl +; X86-NOBMI-NEXT: je .LBB1_2 +; X86-NOBMI-NEXT: # %bb.1: +; X86-NOBMI-NEXT: movl %eax, %edx +; X86-NOBMI-NEXT: xorl %eax, %eax +; X86-NOBMI-NEXT: .LBB1_2: +; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %eax +; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %edx +; X86-NOBMI-NEXT: retl +; +; X86-BMI1-LABEL: mask_pair_64: +; X86-BMI1: # %bb.0: +; X86-BMI1-NEXT: movzbl {{[0-9]+}}(%esp), %ecx +; X86-BMI1-NEXT: movl $-1, %edx +; X86-BMI1-NEXT: movl $-1, %eax +; X86-BMI1-NEXT: shll %cl, %eax +; X86-BMI1-NEXT: testb $32, %cl +; X86-BMI1-NEXT: je .LBB1_2 +; X86-BMI1-NEXT: # %bb.1: +; X86-BMI1-NEXT: movl %eax, %edx +; X86-BMI1-NEXT: xorl %eax, %eax +; X86-BMI1-NEXT: .LBB1_2: +; X86-BMI1-NEXT: andl {{[0-9]+}}(%esp), %eax +; X86-BMI1-NEXT: andl {{[0-9]+}}(%esp), %edx +; X86-BMI1-NEXT: retl +; +; X86-BMI2-LABEL: mask_pair_64: +; X86-BMI2: # %bb.0: +; X86-BMI2-NEXT: movzbl {{[0-9]+}}(%esp), %ecx +; X86-BMI2-NEXT: movl $-1, %edx +; X86-BMI2-NEXT: shlxl %ecx, %edx, %eax +; X86-BMI2-NEXT: testb $32, %cl +; X86-BMI2-NEXT: je .LBB1_2 +; X86-BMI2-NEXT: # %bb.1: +; X86-BMI2-NEXT: movl %eax, %edx +; X86-BMI2-NEXT: xorl %eax, %eax +; X86-BMI2-NEXT: .LBB1_2: +; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %eax +; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %edx +; X86-BMI2-NEXT: retl +; +; X64-NOBMI-LABEL: mask_pair_64: +; X64-NOBMI: # %bb.0: +; X64-NOBMI-NEXT: movq %rsi, %rcx +; X64-NOBMI-NEXT: movq %rdi, %rax +; X64-NOBMI-NEXT: shrq %cl, %rax +; X64-NOBMI-NEXT: # kill: def $cl killed $cl killed $rcx +; X64-NOBMI-NEXT: shlq %cl, %rax +; X64-NOBMI-NEXT: retq +; +; X64-BMI1-LABEL: mask_pair_64: +; X64-BMI1: # %bb.0: +; X64-BMI1-NEXT: movq %rsi, %rcx +; X64-BMI1-NEXT: movq %rdi, %rax +; X64-BMI1-NEXT: shrq %cl, %rax +; X64-BMI1-NEXT: # kill: def $cl killed $cl killed $rcx +; X64-BMI1-NEXT: shlq %cl, %rax +; X64-BMI1-NEXT: retq +; +; X64-BMI2-LABEL: mask_pair_64: +; X64-BMI2: # %bb.0: +; X64-BMI2-NEXT: shrxq %rsi, %rdi, %rax +; X64-BMI2-NEXT: shlxq %rsi, %rax, %rax +; X64-BMI2-NEXT: retq + %shl = shl nsw i64 -1, %y + %and = and i64 %shl, %x + ret i64 %and +} + +define i128 @mask_pair_128(i128 %x, i128 %y) { +; X86-NOBMI-LABEL: mask_pair_128: +; X86-NOBMI: # %bb.0: +; X86-NOBMI-NEXT: pushl %ebx +; X86-NOBMI-NEXT: .cfi_def_cfa_offset 8 +; X86-NOBMI-NEXT: pushl %edi +; X86-NOBMI-NEXT: .cfi_def_cfa_offset 12 +; X86-NOBMI-NEXT: pushl %esi +; X86-NOBMI-NEXT: .cfi_def_cfa_offset 16 +; X86-NOBMI-NEXT: subl $32, %esp +; X86-NOBMI-NEXT: .cfi_def_cfa_offset 48 +; X86-NOBMI-NEXT: .cfi_offset %esi, -16 +; X86-NOBMI-NEXT: .cfi_offset %edi, -12 +; X86-NOBMI-NEXT: .cfi_offset %ebx, -8 +; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NOBMI-NEXT: movl $-1, {{[0-9]+}}(%esp) +; X86-NOBMI-NEXT: movl $-1, {{[0-9]+}}(%esp) +; X86-NOBMI-NEXT: movl $-1, {{[0-9]+}}(%esp) +; X86-NOBMI-NEXT: movl $-1, {{[0-9]+}}(%esp) +; X86-NOBMI-NEXT: movl $0, {{[0-9]+}}(%esp) +; X86-NOBMI-NEXT: movl $0, {{[0-9]+}}(%esp) +; X86-NOBMI-NEXT: movl $0, {{[0-9]+}}(%esp) +; X86-NOBMI-NEXT: movl $0, (%esp) +; X86-NOBMI-NEXT: movl %ecx, %edx +; X86-NOBMI-NEXT: shrb $3, %dl +; X86-NOBMI-NEXT: andb $12, %dl +; X86-NOBMI-NEXT: negb %dl +; X86-NOBMI-NEXT: movsbl %dl, %ebx +; X86-NOBMI-NEXT: movl 24(%esp,%ebx), %edx +; X86-NOBMI-NEXT: movl 28(%esp,%ebx), %esi +; X86-NOBMI-NEXT: shldl %cl, %edx, %esi +; X86-NOBMI-NEXT: movl 16(%esp,%ebx), %edi +; X86-NOBMI-NEXT: movl 20(%esp,%ebx), %ebx +; X86-NOBMI-NEXT: shldl %cl, %ebx, %edx +; X86-NOBMI-NEXT: shldl %cl, %edi, %ebx +; X86-NOBMI-NEXT: # kill: def $cl killed $cl killed $ecx +; X86-NOBMI-NEXT: shll %cl, %edi +; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %edx +; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %esi +; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %edi +; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %ebx +; X86-NOBMI-NEXT: movl %esi, 12(%eax) +; X86-NOBMI-NEXT: movl %edx, 8(%eax) +; X86-NOBMI-NEXT: movl %ebx, 4(%eax) +; X86-NOBMI-NEXT: movl %edi, (%eax) +; X86-NOBMI-NEXT: addl $32, %esp +; X86-NOBMI-NEXT: .cfi_def_cfa_offset 16 +; X86-NOBMI-NEXT: popl %esi +; X86-NOBMI-NEXT: .cfi_def_cfa_offset 12 +; X86-NOBMI-NEXT: popl %edi +; X86-NOBMI-NEXT: .cfi_def_cfa_offset 8 +; X86-NOBMI-NEXT: popl %ebx +; X86-NOBMI-NEXT: .cfi_def_cfa_offset 4 +; X86-NOBMI-NEXT: retl $4 +; +; X86-BMI1-LABEL: mask_pair_128: +; X86-BMI1: # %bb.0: +; X86-BMI1-NEXT: pushl %ebx +; X86-BMI1-NEXT: .cfi_def_cfa_offset 8 +; X86-BMI1-NEXT: pushl %edi +; X86-BMI1-NEXT: .cfi_def_cfa_offset 12 +; X86-BMI1-NEXT: pushl %esi +; X86-BMI1-NEXT: .cfi_def_cfa_offset 16 +; X86-BMI1-NEXT: subl $32, %esp +; X86-BMI1-NEXT: .cfi_def_cfa_offset 48 +; X86-BMI1-NEXT: .cfi_offset %esi, -16 +; X86-BMI1-NEXT: .cfi_offset %edi, -12 +; X86-BMI1-NEXT: .cfi_offset %ebx, -8 +; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-BMI1-NEXT: movl $-1, {{[0-9]+}}(%esp) +; X86-BMI1-NEXT: movl $-1, {{[0-9]+}}(%esp) +; X86-BMI1-NEXT: movl $-1, {{[0-9]+}}(%esp) +; X86-BMI1-NEXT: movl $-1, {{[0-9]+}}(%esp) +; X86-BMI1-NEXT: movl $0, {{[0-9]+}}(%esp) +; X86-BMI1-NEXT: movl $0, {{[0-9]+}}(%esp) +; X86-BMI1-NEXT: movl $0, {{[0-9]+}}(%esp) +; X86-BMI1-NEXT: movl $0, (%esp) +; X86-BMI1-NEXT: movl %ecx, %edx +; X86-BMI1-NEXT: shrb $3, %dl +; X86-BMI1-NEXT: andb $12, %dl +; X86-BMI1-NEXT: negb %dl +; X86-BMI1-NEXT: movsbl %dl, %ebx +; X86-BMI1-NEXT: movl 24(%esp,%ebx), %edx +; X86-BMI1-NEXT: movl 28(%esp,%ebx), %esi +; X86-BMI1-NEXT: shldl %cl, %edx, %esi +; X86-BMI1-NEXT: movl 16(%esp,%ebx), %edi +; X86-BMI1-NEXT: movl 20(%esp,%ebx), %ebx +; X86-BMI1-NEXT: shldl %cl, %ebx, %edx +; X86-BMI1-NEXT: shldl %cl, %edi, %ebx +; X86-BMI1-NEXT: # kill: def $cl killed $cl killed $ecx +; X86-BMI1-NEXT: shll %cl, %edi +; X86-BMI1-NEXT: andl {{[0-9]+}}(%esp), %edx +; X86-BMI1-NEXT: andl {{[0-9]+}}(%esp), %esi +; X86-BMI1-NEXT: andl {{[0-9]+}}(%esp), %edi +; X86-BMI1-NEXT: andl {{[0-9]+}}(%esp), %ebx +; X86-BMI1-NEXT: movl %esi, 12(%eax) +; X86-BMI1-NEXT: movl %edx, 8(%eax) +; X86-BMI1-NEXT: movl %ebx, 4(%eax) +; X86-BMI1-NEXT: movl %edi, (%eax) +; X86-BMI1-NEXT: addl $32, %esp +; X86-BMI1-NEXT: .cfi_def_cfa_offset 16 +; X86-BMI1-NEXT: popl %esi +; X86-BMI1-NEXT: .cfi_def_cfa_offset 12 +; X86-BMI1-NEXT: popl %edi +; X86-BMI1-NEXT: .cfi_def_cfa_offset 8 +; X86-BMI1-NEXT: popl %ebx +; X86-BMI1-NEXT: .cfi_def_cfa_offset 4 +; X86-BMI1-NEXT: retl $4 +; +; X86-BMI2-LABEL: mask_pair_128: +; X86-BMI2: # %bb.0: +; X86-BMI2-NEXT: pushl %ebx +; X86-BMI2-NEXT: .cfi_def_cfa_offset 8 +; X86-BMI2-NEXT: pushl %edi +; X86-BMI2-NEXT: .cfi_def_cfa_offset 12 +; X86-BMI2-NEXT: pushl %esi +; X86-BMI2-NEXT: .cfi_def_cfa_offset 16 +; X86-BMI2-NEXT: subl $32, %esp +; X86-BMI2-NEXT: .cfi_def_cfa_offset 48 +; X86-BMI2-NEXT: .cfi_offset %esi, -16 +; X86-BMI2-NEXT: .cfi_offset %edi, -12 +; X86-BMI2-NEXT: .cfi_offset %ebx, -8 +; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-BMI2-NEXT: movl $-1, {{[0-9]+}}(%esp) +; X86-BMI2-NEXT: movl $-1, {{[0-9]+}}(%esp) +; X86-BMI2-NEXT: movl $-1, {{[0-9]+}}(%esp) +; X86-BMI2-NEXT: movl $-1, {{[0-9]+}}(%esp) +; X86-BMI2-NEXT: movl $0, {{[0-9]+}}(%esp) +; X86-BMI2-NEXT: movl $0, {{[0-9]+}}(%esp) +; X86-BMI2-NEXT: movl $0, {{[0-9]+}}(%esp) +; X86-BMI2-NEXT: movl $0, (%esp) +; X86-BMI2-NEXT: movl %ecx, %edx +; X86-BMI2-NEXT: shrb $3, %dl +; X86-BMI2-NEXT: andb $12, %dl +; X86-BMI2-NEXT: negb %dl +; X86-BMI2-NEXT: movsbl %dl, %edi +; X86-BMI2-NEXT: movl 24(%esp,%edi), %edx +; X86-BMI2-NEXT: movl 28(%esp,%edi), %esi +; X86-BMI2-NEXT: shldl %cl, %edx, %esi +; X86-BMI2-NEXT: movl 16(%esp,%edi), %ebx +; X86-BMI2-NEXT: movl 20(%esp,%edi), %edi +; X86-BMI2-NEXT: shldl %cl, %edi, %edx +; X86-BMI2-NEXT: shldl %cl, %ebx, %edi +; X86-BMI2-NEXT: shlxl %ecx, %ebx, %ecx +; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %edx +; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %esi +; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %ecx +; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %edi +; X86-BMI2-NEXT: movl %esi, 12(%eax) +; X86-BMI2-NEXT: movl %edx, 8(%eax) +; X86-BMI2-NEXT: movl %edi, 4(%eax) +; X86-BMI2-NEXT: movl %ecx, (%eax) +; X86-BMI2-NEXT: addl $32, %esp +; X86-BMI2-NEXT: .cfi_def_cfa_offset 16 +; X86-BMI2-NEXT: popl %esi +; X86-BMI2-NEXT: .cfi_def_cfa_offset 12 +; X86-BMI2-NEXT: popl %edi +; X86-BMI2-NEXT: .cfi_def_cfa_offset 8 +; X86-BMI2-NEXT: popl %ebx +; X86-BMI2-NEXT: .cfi_def_cfa_offset 4 +; X86-BMI2-NEXT: retl $4 +; +; X64-NOBMI-LABEL: mask_pair_128: +; X64-NOBMI: # %bb.0: +; X64-NOBMI-NEXT: movq %rdx, %rcx +; X64-NOBMI-NEXT: movq $-1, %rdx +; X64-NOBMI-NEXT: movq $-1, %r8 +; X64-NOBMI-NEXT: shlq %cl, %r8 +; X64-NOBMI-NEXT: xorl %eax, %eax +; X64-NOBMI-NEXT: testb $64, %cl +; X64-NOBMI-NEXT: cmovneq %r8, %rdx +; X64-NOBMI-NEXT: cmoveq %r8, %rax +; X64-NOBMI-NEXT: andq %rdi, %rax +; X64-NOBMI-NEXT: andq %rsi, %rdx +; X64-NOBMI-NEXT: retq +; +; X64-BMI1-LABEL: mask_pair_128: +; X64-BMI1: # %bb.0: +; X64-BMI1-NEXT: movq %rdx, %rcx +; X64-BMI1-NEXT: movq $-1, %rdx +; X64-BMI1-NEXT: movq $-1, %r8 +; X64-BMI1-NEXT: shlq %cl, %r8 +; X64-BMI1-NEXT: xorl %eax, %eax +; X64-BMI1-NEXT: testb $64, %cl +; X64-BMI1-NEXT: cmovneq %r8, %rdx +; X64-BMI1-NEXT: cmoveq %r8, %rax +; X64-BMI1-NEXT: andq %rdi, %rax +; X64-BMI1-NEXT: andq %rsi, %rdx +; X64-BMI1-NEXT: retq +; +; X64-BMI2-LABEL: mask_pair_128: +; X64-BMI2: # %bb.0: +; X64-BMI2-NEXT: movq $-1, %rcx +; X64-BMI2-NEXT: shlxq %rdx, %rcx, %r8 +; X64-BMI2-NEXT: xorl %eax, %eax +; X64-BMI2-NEXT: testb $64, %dl +; X64-BMI2-NEXT: cmovneq %r8, %rcx +; X64-BMI2-NEXT: cmoveq %r8, %rax +; X64-BMI2-NEXT: andq %rdi, %rax +; X64-BMI2-NEXT: andq %rsi, %rcx +; X64-BMI2-NEXT: movq %rcx, %rdx +; X64-BMI2-NEXT: retq + %shl = shl nsw i128 -1, %y + %and = and i128 %shl, %x + ret i128 %and +} +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; X64: {{.*}} +; X64-BMINOTBM: {{.*}} +; X64-BMITBM: {{.*}} +; X86: {{.*}} +; X86-BMINOTBM: {{.*}} +; X86-BMITBM: {{.*}}