Skip to content

Conversation

AZero13
Copy link
Contributor

@AZero13 AZero13 commented Sep 11, 2025

For ARM, we want to do this up to 32-bits. Otherwise the code ends up bigger and bloated.

@llvmbot
Copy link
Member

llvmbot commented Sep 11, 2025

@llvm/pr-subscribers-backend-arm

Author: AZero13 (AZero13)

Changes

For ARM, we want to do this up to 32-bits. Otherwise the code ends up bigger and bloated.


Patch is 262.28 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/158070.diff

4 Files Affected:

  • (modified) llvm/lib/Target/ARM/ARMISelLowering.h (+10)
  • (added) llvm/test/CodeGen/ARM/and-mask-variable.ll (+90)
  • (added) llvm/test/CodeGen/ARM/extract-bits.ll (+5674)
  • (added) llvm/test/CodeGen/ARM/extract-lowbits.ll (+3357)
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
index 196ecb1b9f678..fa8cc9d21c5e5 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -773,6 +773,16 @@ class VectorType;
     bool shouldFoldConstantShiftPairToMask(const SDNode *N,
                                            CombineLevel Level) const override;
 
+    /// Return true if it is profitable to fold a pair of shifts into a mask.
+    bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override {
+      EVT VT = Y.getValueType();
+
+      if (VT.isVector())
+        return false;
+
+      return VT.getScalarSizeInBits() <= 32;
+    }
+
     bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT,
                                               unsigned SelectOpcode, SDValue X,
                                               SDValue Y) const override;
diff --git a/llvm/test/CodeGen/ARM/and-mask-variable.ll b/llvm/test/CodeGen/ARM/and-mask-variable.ll
new file mode 100644
index 0000000000000..0f84b76f97a6b
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/and-mask-variable.ll
@@ -0,0 +1,90 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv7m-eabi  %s -o - | FileCheck %s --check-prefix V7M
+; RUN: llc -mtriple=armv7a-eabi  %s -o -   | FileCheck %s --check-prefix V7A
+; RUN: llc -mtriple=thumbv7a-eabi  %s -o -   | FileCheck %s --check-prefix V7A-T
+; RUN: llc -mtriple=armv6m-eabi  %s -o -   | FileCheck %s --check-prefix V6M
+
+define i32 @mask_pair(i32 %x, i32 %y) {
+; V7M-LABEL: mask_pair:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    lsls r0, r1
+; V7M-NEXT:    bx lr
+;
+; V7A-LABEL: mask_pair:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    lsr r0, r0, r1
+; V7A-NEXT:    lsl r0, r0, r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: mask_pair:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    lsls r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: mask_pair:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    lsls r0, r1
+; V6M-NEXT:    bx lr
+  %shl = shl nsw i32 -1, %y
+  %and = and i32 %shl, %x
+  ret i32 %and
+}
+
+define i64 @mask_pair_64(i64 %x, i64 %y) {
+; V7M-LABEL: mask_pair_64:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    mov.w r3, #-1
+; V7M-NEXT:    lsl.w r12, r3, r2
+; V7M-NEXT:    subs r2, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl.w r12, #0
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl r3, r2
+; V7M-NEXT:    and.w r0, r0, r12
+; V7M-NEXT:    ands r1, r3
+; V7M-NEXT:    bx lr
+;
+; V7A-LABEL: mask_pair_64:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    subs r12, r2, #32
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    lsl r2, r3, r2
+; V7A-NEXT:    lslpl r3, r3, r12
+; V7A-NEXT:    movwpl r2, #0
+; V7A-NEXT:    and r1, r3, r1
+; V7A-NEXT:    and r0, r2, r0
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: mask_pair_64:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    mov.w r3, #-1
+; V7A-T-NEXT:    lsl.w r12, r3, r2
+; V7A-T-NEXT:    subs r2, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl.w r12, #0
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl r3, r2
+; V7A-T-NEXT:    and.w r0, r0, r12
+; V7A-T-NEXT:    ands r1, r3
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: mask_pair_64:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r7, lr}
+; V6M-NEXT:    push {r4, r5, r7, lr}
+; V6M-NEXT:    mov r4, r1
+; V6M-NEXT:    mov r5, r0
+; V6M-NEXT:    movs r0, #0
+; V6M-NEXT:    mvns r0, r0
+; V6M-NEXT:    mov r1, r0
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    ands r0, r5
+; V6M-NEXT:    ands r1, r4
+; V6M-NEXT:    pop {r4, r5, r7, pc}
+  %shl = shl nsw i64 -1, %y
+  %and = and i64 %shl, %x
+  ret i64 %and
+}
diff --git a/llvm/test/CodeGen/ARM/extract-bits.ll b/llvm/test/CodeGen/ARM/extract-bits.ll
new file mode 100644
index 0000000000000..237c8bc7d2906
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/extract-bits.ll
@@ -0,0 +1,5674 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv7m-eabi  %s -o - | FileCheck %s --check-prefix V7M
+; RUN: llc -mtriple=armv7a-eabi  %s -o -   | FileCheck %s --check-prefix V7A
+; RUN: llc -mtriple=thumbv7a-eabi  %s -o -   | FileCheck %s --check-prefix V7A-T
+; RUN: llc -mtriple=armv6m-eabi  %s -o -   | FileCheck %s --check-prefix V6M
+
+; *Please* keep in sync with test/CodeGen/X86/extract-bits.ll
+
+; https://bugs.llvm.org/show_bug.cgi?id=36419
+; https://bugs.llvm.org/show_bug.cgi?id=37603
+; https://bugs.llvm.org/show_bug.cgi?id=37610
+
+; Patterns:
+;   a) (x >> start) &  (1 << nbits) - 1
+;   b) (x >> start) & ~(-1 << nbits)
+;   c) (x >> start) &  (-1 >> (32 - y))
+;   d) (x >> start) << (32 - y) >> (32 - y)
+; are equivalent.
+
+; ---------------------------------------------------------------------------- ;
+; Pattern a. 32-bit
+; ---------------------------------------------------------------------------- ;
+
+define i32 @bextr32_a0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bextr32_a0:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    movs r1, #1
+; CHECK-NEXT:    lsls r1, r2
+; CHECK-NEXT:    subs r1, #1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7M-LABEL: bextr32_a0:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    movs r1, #1
+; V7M-NEXT:    lsls r1, r2
+; V7M-NEXT:    subs r1, #1
+; V7M-NEXT:    ands r0, r1
+; V7M-NEXT:    bx lr
+;
+; V7A-LABEL: bextr32_a0:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    mov r12, #1
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    add r2, r3, r12, lsl r2
+; V7A-NEXT:    and r0, r2, r0, lsr r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr32_a0:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    movs r1, #1
+; V7A-T-NEXT:    lsls r1, r2
+; V7A-T-NEXT:    subs r1, #1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr32_a0:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    movs r1, #1
+; V6M-NEXT:    lsls r1, r2
+; V6M-NEXT:    subs r1, r1, #1
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    bx lr
+  %shifted = lshr i32 %val, %numskipbits
+  %onebit = shl i32 1, %numlowbits
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %mask, %shifted
+  ret i32 %masked
+}
+
+define i32 @bextr32_a0_arithmetic(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bextr32_a0_arithmetic:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    asrs r0, r1
+; CHECK-NEXT:    movs r1, #1
+; CHECK-NEXT:    lsls r1, r2
+; CHECK-NEXT:    subs r1, #1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7M-LABEL: bextr32_a0_arithmetic:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    asrs r0, r1
+; V7M-NEXT:    movs r1, #1
+; V7M-NEXT:    lsls r1, r2
+; V7M-NEXT:    subs r1, #1
+; V7M-NEXT:    ands r0, r1
+; V7M-NEXT:    bx lr
+;
+; V7A-LABEL: bextr32_a0_arithmetic:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    mov r12, #1
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    add r2, r3, r12, lsl r2
+; V7A-NEXT:    and r0, r2, r0, asr r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr32_a0_arithmetic:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    asrs r0, r1
+; V7A-T-NEXT:    movs r1, #1
+; V7A-T-NEXT:    lsls r1, r2
+; V7A-T-NEXT:    subs r1, #1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr32_a0_arithmetic:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    asrs r0, r1
+; V6M-NEXT:    movs r1, #1
+; V6M-NEXT:    lsls r1, r2
+; V6M-NEXT:    subs r1, r1, #1
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    bx lr
+  %shifted = ashr i32 %val, %numskipbits
+  %onebit = shl i32 1, %numlowbits
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %mask, %shifted
+  ret i32 %masked
+}
+
+define i32 @bextr32_a1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+; CHECK-LABEL: bextr32_a1_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    movs r1, #1
+; CHECK-NEXT:    lsls r1, r2
+; CHECK-NEXT:    subs r1, #1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7M-LABEL: bextr32_a1_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    movs r1, #1
+; V7M-NEXT:    lsls r1, r2
+; V7M-NEXT:    subs r1, #1
+; V7M-NEXT:    ands r0, r1
+; V7M-NEXT:    bx lr
+;
+; V7A-LABEL: bextr32_a1_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    mov r12, #1
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    add r2, r3, r12, lsl r2
+; V7A-NEXT:    and r0, r2, r0, lsr r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr32_a1_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    movs r1, #1
+; V7A-T-NEXT:    lsls r1, r2
+; V7A-T-NEXT:    subs r1, #1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr32_a1_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    movs r1, #1
+; V6M-NEXT:    lsls r1, r2
+; V6M-NEXT:    subs r1, r1, #1
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    bx lr
+  %skip = zext i8 %numskipbits to i32
+  %shifted = lshr i32 %val, %skip
+  %conv = zext i8 %numlowbits to i32
+  %onebit = shl i32 1, %conv
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %mask, %shifted
+  ret i32 %masked
+}
+
+define i32 @bextr32_a2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bextr32_a2_load:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    movs r1, #1
+; CHECK-NEXT:    lsls r1, r2
+; CHECK-NEXT:    subs r1, #1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7M-LABEL: bextr32_a2_load:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ldr r0, [r0]
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    movs r1, #1
+; V7M-NEXT:    lsls r1, r2
+; V7M-NEXT:    subs r1, #1
+; V7M-NEXT:    ands r0, r1
+; V7M-NEXT:    bx lr
+;
+; V7A-LABEL: bextr32_a2_load:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ldr r0, [r0]
+; V7A-NEXT:    mov r12, #1
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    add r2, r3, r12, lsl r2
+; V7A-NEXT:    and r0, r2, r0, lsr r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr32_a2_load:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ldr r0, [r0]
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    movs r1, #1
+; V7A-T-NEXT:    lsls r1, r2
+; V7A-T-NEXT:    subs r1, #1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr32_a2_load:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    ldr r3, [r0]
+; V6M-NEXT:    lsrs r3, r1
+; V6M-NEXT:    movs r0, #1
+; V6M-NEXT:    lsls r0, r2
+; V6M-NEXT:    subs r0, r0, #1
+; V6M-NEXT:    ands r0, r3
+; V6M-NEXT:    bx lr
+  %val = load i32, ptr %w
+  %shifted = lshr i32 %val, %numskipbits
+  %onebit = shl i32 1, %numlowbits
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %mask, %shifted
+  ret i32 %masked
+}
+
+define i32 @bextr32_a3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+; CHECK-LABEL: bextr32_a3_load_indexzext:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    movs r1, #1
+; CHECK-NEXT:    lsls r1, r2
+; CHECK-NEXT:    subs r1, #1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7M-LABEL: bextr32_a3_load_indexzext:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    ldr r0, [r0]
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    movs r1, #1
+; V7M-NEXT:    lsls r1, r2
+; V7M-NEXT:    subs r1, #1
+; V7M-NEXT:    ands r0, r1
+; V7M-NEXT:    bx lr
+;
+; V7A-LABEL: bextr32_a3_load_indexzext:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ldr r0, [r0]
+; V7A-NEXT:    mov r12, #1
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    add r2, r3, r12, lsl r2
+; V7A-NEXT:    and r0, r2, r0, lsr r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr32_a3_load_indexzext:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    ldr r0, [r0]
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    movs r1, #1
+; V7A-T-NEXT:    lsls r1, r2
+; V7A-T-NEXT:    subs r1, #1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr32_a3_load_indexzext:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    ldr r3, [r0]
+; V6M-NEXT:    lsrs r3, r1
+; V6M-NEXT:    movs r0, #1
+; V6M-NEXT:    lsls r0, r2
+; V6M-NEXT:    subs r0, r0, #1
+; V6M-NEXT:    ands r0, r3
+; V6M-NEXT:    bx lr
+  %val = load i32, ptr %w
+  %skip = zext i8 %numskipbits to i32
+  %shifted = lshr i32 %val, %skip
+  %conv = zext i8 %numlowbits to i32
+  %onebit = shl i32 1, %conv
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %mask, %shifted
+  ret i32 %masked
+}
+
+define i32 @bextr32_a4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
+; CHECK-LABEL: bextr32_a4_commutative:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    lsrs r0, r1
+; CHECK-NEXT:    movs r1, #1
+; CHECK-NEXT:    lsls r1, r2
+; CHECK-NEXT:    subs r1, #1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+;
+; V7M-LABEL: bextr32_a4_commutative:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    lsrs r0, r1
+; V7M-NEXT:    movs r1, #1
+; V7M-NEXT:    lsls r1, r2
+; V7M-NEXT:    subs r1, #1
+; V7M-NEXT:    ands r0, r1
+; V7M-NEXT:    bx lr
+;
+; V7A-LABEL: bextr32_a4_commutative:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    mov r12, #1
+; V7A-NEXT:    mvn r3, #0
+; V7A-NEXT:    add r2, r3, r12, lsl r2
+; V7A-NEXT:    and r0, r2, r0, lsr r1
+; V7A-NEXT:    bx lr
+;
+; V7A-T-LABEL: bextr32_a4_commutative:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    lsrs r0, r1
+; V7A-T-NEXT:    movs r1, #1
+; V7A-T-NEXT:    lsls r1, r2
+; V7A-T-NEXT:    subs r1, #1
+; V7A-T-NEXT:    ands r0, r1
+; V7A-T-NEXT:    bx lr
+;
+; V6M-LABEL: bextr32_a4_commutative:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    lsrs r0, r1
+; V6M-NEXT:    movs r1, #1
+; V6M-NEXT:    lsls r1, r2
+; V6M-NEXT:    subs r1, r1, #1
+; V6M-NEXT:    ands r0, r1
+; V6M-NEXT:    bx lr
+  %shifted = lshr i32 %val, %numskipbits
+  %onebit = shl i32 1, %numlowbits
+  %mask = add nsw i32 %onebit, -1
+  %masked = and i32 %shifted, %mask ; swapped order
+  ret i32 %masked
+}
+
+; 64-bit
+
+define i64 @bextr64_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_a0:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    ldr.w r12, [sp, #8]
+; CHECK-NEXT:    mov.w lr, #1
+; CHECK-NEXT:    lsrs r0, r2
+; CHECK-NEXT:    rsb.w r4, r12, #32
+; CHECK-NEXT:    subs.w r3, r12, #32
+; CHECK-NEXT:    lsr.w r4, lr, r4
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r4, lr, r3
+; CHECK-NEXT:    lsl.w r3, lr, r12
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r3, #0
+; CHECK-NEXT:    subs r3, #1
+; CHECK-NEXT:    sbc r12, r4, #0
+; CHECK-NEXT:    rsb.w r4, r2, #32
+; CHECK-NEXT:    lsl.w r4, r1, r4
+; CHECK-NEXT:    orrs r0, r4
+; CHECK-NEXT:    subs.w r4, r2, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lsrpl.w r0, r1, r4
+; CHECK-NEXT:    lsr.w r1, r1, r2
+; CHECK-NEXT:    and.w r0, r0, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r1, #0
+; CHECK-NEXT:    and.w r1, r1, r12
+; CHECK-NEXT:    pop {r4, pc}
+;
+; V7M-LABEL: bextr64_a0:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r4, lr}
+; V7M-NEXT:    push {r4, lr}
+; V7M-NEXT:    ldr.w r12, [sp, #8]
+; V7M-NEXT:    mov.w lr, #1
+; V7M-NEXT:    lsrs r0, r2
+; V7M-NEXT:    rsb.w r4, r12, #32
+; V7M-NEXT:    subs.w r3, r12, #32
+; V7M-NEXT:    lsr.w r4, lr, r4
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r4, lr, r3
+; V7M-NEXT:    lsl.w r3, lr, r12
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r3, #0
+; V7M-NEXT:    subs r3, #1
+; V7M-NEXT:    sbc r12, r4, #0
+; V7M-NEXT:    rsb.w r4, r2, #32
+; V7M-NEXT:    lsl.w r4, r1, r4
+; V7M-NEXT:    orrs r0, r4
+; V7M-NEXT:    subs.w r4, r2, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lsrpl.w r0, r1, r4
+; V7M-NEXT:    lsr.w r1, r1, r2
+; V7M-NEXT:    and.w r0, r0, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r1, #0
+; V7M-NEXT:    and.w r1, r1, r12
+; V7M-NEXT:    pop {r4, pc}
+;
+; V7A-LABEL: bextr64_a0:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    .save {r4, lr}
+; V7A-NEXT:    push {r4, lr}
+; V7A-NEXT:    ldr r12, [sp, #8]
+; V7A-NEXT:    mov lr, #1
+; V7A-NEXT:    lsr r0, r0, r2
+; V7A-NEXT:    rsb r3, r12, #32
+; V7A-NEXT:    subs r4, r12, #32
+; V7A-NEXT:    lsr r3, lr, r3
+; V7A-NEXT:    lslpl r3, lr, r4
+; V7A-NEXT:    lsl r4, lr, r12
+; V7A-NEXT:    movwpl r4, #0
+; V7A-NEXT:    subs r4, r4, #1
+; V7A-NEXT:    sbc r12, r3, #0
+; V7A-NEXT:    rsb r3, r2, #32
+; V7A-NEXT:    orr r0, r0, r1, lsl r3
+; V7A-NEXT:    subs r3, r2, #32
+; V7A-NEXT:    lsrpl r0, r1, r3
+; V7A-NEXT:    lsr r1, r1, r2
+; V7A-NEXT:    movwpl r1, #0
+; V7A-NEXT:    and r0, r4, r0
+; V7A-NEXT:    and r1, r12, r1
+; V7A-NEXT:    pop {r4, pc}
+;
+; V7A-T-LABEL: bextr64_a0:
+; V7A-T:       @ %bb.0:
+; V7A-T-NEXT:    .save {r4, lr}
+; V7A-T-NEXT:    push {r4, lr}
+; V7A-T-NEXT:    ldr.w r12, [sp, #8]
+; V7A-T-NEXT:    mov.w lr, #1
+; V7A-T-NEXT:    lsrs r0, r2
+; V7A-T-NEXT:    rsb.w r4, r12, #32
+; V7A-T-NEXT:    subs.w r3, r12, #32
+; V7A-T-NEXT:    lsr.w r4, lr, r4
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lslpl.w r4, lr, r3
+; V7A-T-NEXT:    lsl.w r3, lr, r12
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r3, #0
+; V7A-T-NEXT:    subs r3, #1
+; V7A-T-NEXT:    sbc r12, r4, #0
+; V7A-T-NEXT:    rsb.w r4, r2, #32
+; V7A-T-NEXT:    lsl.w r4, r1, r4
+; V7A-T-NEXT:    orrs r0, r4
+; V7A-T-NEXT:    subs.w r4, r2, #32
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    lsrpl.w r0, r1, r4
+; V7A-T-NEXT:    lsr.w r1, r1, r2
+; V7A-T-NEXT:    and.w r0, r0, r3
+; V7A-T-NEXT:    it pl
+; V7A-T-NEXT:    movpl r1, #0
+; V7A-T-NEXT:    and.w r1, r1, r12
+; V7A-T-NEXT:    pop {r4, pc}
+;
+; V6M-LABEL: bextr64_a0:
+; V6M:       @ %bb.0:
+; V6M-NEXT:    .save {r4, r5, r6, r7, lr}
+; V6M-NEXT:    push {r4, r5, r6, r7, lr}
+; V6M-NEXT:    .pad #12
+; V6M-NEXT:    sub sp, #12
+; V6M-NEXT:    str r2, [sp, #8] @ 4-byte Spill
+; V6M-NEXT:    str r1, [sp, #4] @ 4-byte Spill
+; V6M-NEXT:    mov r6, r0
+; V6M-NEXT:    movs r0, #1
+; V6M-NEXT:    movs r7, #0
+; V6M-NEXT:    ldr r2, [sp, #32]
+; V6M-NEXT:    mov r1, r7
+; V6M-NEXT:    bl __aeabi_llsl
+; V6M-NEXT:    mov r4, r1
+; V6M-NEXT:    subs r5, r0, #1
+; V6M-NEXT:    sbcs r4, r7
+; V6M-NEXT:    mov r0, r6
+; V6M-NEXT:    ldr r1, [sp, #4] @ 4-byte Reload
+; V6M-NEXT:    ldr r2, [sp, #8] @ 4-byte Reload
+; V6M-NEXT:    bl __aeabi_llsr
+; V6M-NEXT:    ands r0, r5
+; V6M-NEXT:    ands r1, r4
+; V6M-NEXT:    add sp, #12
+; V6M-NEXT:    pop {r4, r5, r6, r7, pc}
+  %shifted = lshr i64 %val, %numskipbits
+  %onebit = shl i64 1, %numlowbits
+  %mask = add nsw i64 %onebit, -1
+  %masked = and i64 %mask, %shifted
+  ret i64 %masked
+}
+
+define i64 @bextr64_a0_arithmetic(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
+; CHECK-LABEL: bextr64_a0_arithmetic:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    ldr.w r12, [sp, #8]
+; CHECK-NEXT:    mov.w lr, #1
+; CHECK-NEXT:    lsrs r0, r2
+; CHECK-NEXT:    rsb.w r4, r12, #32
+; CHECK-NEXT:    subs.w r3, r12, #32
+; CHECK-NEXT:    lsr.w r4, lr, r4
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    lslpl.w r4, lr, r3
+; CHECK-NEXT:    lsl.w r3, lr, r12
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    movpl r3, #0
+; CHECK-NEXT:    subs r3, #1
+; CHECK-NEXT:    sbc r12, r4, #0
+; CHECK-NEXT:    rsb.w r4, r2, #32
+; CHECK-NEXT:    lsl.w r4, r1, r4
+; CHECK-NEXT:    orrs r0, r4
+; CHECK-NEXT:    subs.w r4, r2, #32
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    asrpl.w r0, r1, r4
+; CHECK-NEXT:    asr.w r2, r1, r2
+; CHECK-NEXT:    and.w r0, r0, r3
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    asrpl r2, r1, #31
+; CHECK-NEXT:    and.w r1, r12, r2
+; CHECK-NEXT:    pop {r4, pc}
+;
+; V7M-LABEL: bextr64_a0_arithmetic:
+; V7M:       @ %bb.0:
+; V7M-NEXT:    .save {r4, lr}
+; V7M-NEXT:    push {r4, lr}
+; V7M-NEXT:    ldr.w r12, [sp, #8]
+; V7M-NEXT:    mov.w lr, #1
+; V7M-NEXT:    lsrs r0, r2
+; V7M-NEXT:    rsb.w r4, r12, #32
+; V7M-NEXT:    subs.w r3, r12, #32
+; V7M-NEXT:    lsr.w r4, lr, r4
+; V7M-NEXT:    it pl
+; V7M-NEXT:    lslpl.w r4, lr, r3
+; V7M-NEXT:    lsl.w r3, lr, r12
+; V7M-NEXT:    it pl
+; V7M-NEXT:    movpl r3, #0
+; V7M-NEXT:    subs r3, #1
+; V7M-NEXT:    sbc r12, r4, #0
+; V7M-NEXT:    rsb.w r4, r2, #32
+; V7M-NEXT:    lsl.w r4, r1, r4
+; V7M-NEXT:    orrs r0, r4
+; V7M-NEXT:    subs.w r4, r2, #32
+; V7M-NEXT:    it pl
+; V7M-NEXT:    asrpl.w r0, r1, r4
+; V7M-NEXT:    asr.w r2, r1, r2
+; V7M-NEXT:    and.w r0, r0, r3
+; V7M-NEXT:    it pl
+; V7M-NEXT:    asrpl r2, r1, #31
+; V7M-NEXT:    and.w r1, r12, r2
+; V7M-NEXT:    pop {r4, pc}
+;
+; V7A-LABEL: bextr64_a0_arithmetic:
+; V7A:       @ %bb.0:
+; V7A-NEXT:    ....
[truncated]

@AZero13
Copy link
Contributor Author

AZero13 commented Sep 11, 2025

@davemgreen

@AZero13 AZero13 force-pushed the Arm-shifty branch 2 times, most recently from 0393d6d to 989848d Compare September 29, 2025 23:15
@AZero13 AZero13 requested a review from davemgreen September 29, 2025 23:16
@AZero13
Copy link
Contributor Author

AZero13 commented Sep 30, 2025

@davemgreen thoughts?

Copy link
Collaborator

@davemgreen davemgreen left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks, LGTM

… to the biggest legal type

For ARM, we want to do this for scalars up to the biggest legal type.
@AZero13
Copy link
Contributor Author

AZero13 commented Oct 2, 2025

Thank you. Can you please merge? I just rebased to check everything is okay and removed the comments.

@AZero13
Copy link
Contributor Author

AZero13 commented Oct 3, 2025

@davemgreen

@davemgreen davemgreen merged commit 90582ad into llvm:main Oct 3, 2025
9 checks passed
MixedMatched pushed a commit to MixedMatched/llvm-project that referenced this pull request Oct 3, 2025
… to the biggest legal type (llvm#158070)

For ARM, we want to do this up to 32-bits. Otherwise the code ends up
bigger and bloated.
@AZero13 AZero13 deleted the Arm-shifty branch October 3, 2025 14:11
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

3 participants