Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[AArch64] Use mov as opposed to And 0xffffffff #98655

Merged
merged 1 commit into from
Jul 14, 2024

Conversation

davemgreen
Copy link
Collaborator

This adds a tablegen pattern to use ORRWrr (mov) as opposed to i64 AND 0xffffffff, as the mov will implicitly clear the upper bits. This can be seen as a zext(trunc(..)), and could be simpler if it is eliminated.

This adds a tablegen pattern to use ORRWrr (mov) as opposed to
i64 AND 0xffffffff, as the mov will implicitly clear the upper
bits. This can be seen as a zext(trunc(..)), and could be simpler
if it is eliminated.
@llvmbot
Copy link
Collaborator

llvmbot commented Jul 12, 2024

@llvm/pr-subscribers-backend-aarch64

Author: David Green (davemgreen)

Changes

This adds a tablegen pattern to use ORRWrr (mov) as opposed to i64 AND 0xffffffff, as the mov will implicitly clear the upper bits. This can be seen as a zext(trunc(..)), and could be simpler if it is eliminated.


Full diff: https://github.com/llvm/llvm-project/pull/98655.diff

10 Files Affected:

  • (modified) llvm/lib/Target/AArch64/AArch64InstrInfo.td (+5)
  • (modified) llvm/test/CodeGen/AArch64/aarch64-mull-masks.ll (+8-8)
  • (modified) llvm/test/CodeGen/AArch64/and-mask-removal.ll (+30)
  • (modified) llvm/test/CodeGen/AArch64/arm64-2012-05-09-LOADgot-bug.ll (+1-1)
  • (modified) llvm/test/CodeGen/AArch64/arm64_32-memcpy.ll (+6-6)
  • (modified) llvm/test/CodeGen/AArch64/arm64_32-pointer-extend.ll (+3-3)
  • (modified) llvm/test/CodeGen/AArch64/arm64_32.ll (+5-4)
  • (modified) llvm/test/CodeGen/AArch64/bitfield.ll (+1-1)
  • (modified) llvm/test/CodeGen/AArch64/pr58431.ll (+1-1)
  • (modified) llvm/test/CodeGen/AArch64/swifterror.ll (+1-1)
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 152a6c2e95b27..dd11f74882115 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -2700,6 +2700,11 @@ def : InstAlias<"tst $src1, $src2$sh",
 def : Pat<(not GPR32:$Wm), (ORNWrr WZR, GPR32:$Wm)>;
 def : Pat<(not GPR64:$Xm), (ORNXrr XZR, GPR64:$Xm)>;
 
+// Emit (and 0xFFFFFFFF) as a ORRWrr move which may be eliminated.
+let AddedComplexity = 6 in
+def : Pat<(i64 (and GPR64:$Rn, 0xffffffff)),
+          (SUBREG_TO_REG (i64 0), (ORRWrr WZR, (EXTRACT_SUBREG GPR64:$Rn, sub_32)), sub_32)>;
+
 
 //===----------------------------------------------------------------------===//
 // One operand data processing instructions.
diff --git a/llvm/test/CodeGen/AArch64/aarch64-mull-masks.ll b/llvm/test/CodeGen/AArch64/aarch64-mull-masks.ll
index e4b534bfe0e37..f49d469e50cdd 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-mull-masks.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-mull-masks.ll
@@ -997,7 +997,7 @@ define i64 @umull_ldr2_d(ptr %x0, i64 %x1) {
 ; CHECK-LABEL: umull_ldr2_d:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
-; CHECK-NEXT:    and x9, x1, #0xffffffff
+; CHECK-NEXT:    mov w9, w1
 ; CHECK-NEXT:    umull x0, w8, w9
 ; CHECK-NEXT:    ret
 entry:
@@ -1110,7 +1110,7 @@ define i64 @umaddl_ldr2_d(ptr %x0, i64 %x1, i64 %x2) {
 ; CHECK-LABEL: umaddl_ldr2_d:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
-; CHECK-NEXT:    and x9, x1, #0xffffffff
+; CHECK-NEXT:    mov w9, w1
 ; CHECK-NEXT:    umaddl x0, w8, w9, x2
 ; CHECK-NEXT:    ret
 entry:
@@ -1224,7 +1224,7 @@ define i64 @umnegl_ldr2_d(ptr %x0, i64 %x1) {
 ; CHECK-LABEL: umnegl_ldr2_d:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
-; CHECK-NEXT:    and x9, x1, #0xffffffff
+; CHECK-NEXT:    mov w9, w1
 ; CHECK-NEXT:    umnegl x0, w8, w9
 ; CHECK-NEXT:    ret
 entry:
@@ -1338,7 +1338,7 @@ define i64 @umsubl_ldr2_d(ptr %x0, i64 %x1, i64 %x2) {
 ; CHECK-LABEL: umsubl_ldr2_d:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
-; CHECK-NEXT:    and x9, x1, #0xffffffff
+; CHECK-NEXT:    mov w9, w1
 ; CHECK-NEXT:    umsubl x0, w8, w9, x2
 ; CHECK-NEXT:    ret
 entry:
@@ -1400,7 +1400,7 @@ define i64 @umull_and_lshr(i64 %x) {
 ; CHECK-LABEL: umull_and_lshr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr x8, x0, #32
-; CHECK-NEXT:    and x9, x0, #0xffffffff
+; CHECK-NEXT:    mov w9, w0
 ; CHECK-NEXT:    umull x0, w9, w8
 ; CHECK-NEXT:    ret
     %lo = and i64 %x, u0xffffffff
@@ -1424,7 +1424,7 @@ define i64 @umaddl_and_lshr(i64 %x, i64 %a) {
 ; CHECK-LABEL: umaddl_and_lshr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr x8, x0, #32
-; CHECK-NEXT:    and x9, x0, #0xffffffff
+; CHECK-NEXT:    mov w9, w0
 ; CHECK-NEXT:    umaddl x0, w9, w8, x1
 ; CHECK-NEXT:    ret
     %lo = and i64 %x, u0xffffffff
@@ -1437,8 +1437,8 @@ define i64 @umaddl_and_lshr(i64 %x, i64 %a) {
 define i64 @umaddl_and_and(i64 %x, i64 %y, i64 %a) {
 ; CHECK-LABEL: umaddl_and_and:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    and x8, x0, #0xffffffff
-; CHECK-NEXT:    and x9, x1, #0xffffffff
+; CHECK-NEXT:    mov w8, w0
+; CHECK-NEXT:    mov w9, w1
 ; CHECK-NEXT:    umaddl x0, w8, w9, x2
 ; CHECK-NEXT:    ret
     %lo = and i64 %x, u0xffffffff
diff --git a/llvm/test/CodeGen/AArch64/and-mask-removal.ll b/llvm/test/CodeGen/AArch64/and-mask-removal.ll
index 493d503de2cc1..f005ca47ad124 100644
--- a/llvm/test/CodeGen/AArch64/and-mask-removal.ll
+++ b/llvm/test/CodeGen/AArch64/and-mask-removal.ll
@@ -549,3 +549,33 @@ define i64 @test_2_selects(i8 zeroext %a) {
 }
 
 declare i8 @llvm.usub.sat.i8(i8, i8) #0
+
+define i64 @and0xffffffff(i64 %a) nounwind ssp {
+; CHECK-LABEL: and0xffffffff:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    mov w0, w0
+; CHECK-NEXT:    ret
+entry:
+  %b = and i64 %a, u0xffffffff
+  ret i64 %b
+}
+
+define i64 @and0xfffffff0(i64 %a) nounwind ssp {
+; CHECK-LABEL: and0xfffffff0:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    and x0, x0, #0xfffffff0
+; CHECK-NEXT:    ret
+entry:
+  %b = and i64 %a, u0xfffffff0
+  ret i64 %b
+}
+
+define i64 @and0x7fffffff(i64 %a) nounwind ssp {
+; CHECK-LABEL: and0x7fffffff:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    and x0, x0, #0x7fffffff
+; CHECK-NEXT:    ret
+entry:
+  %b = and i64 %a, u0x7fffffff
+  ret i64 %b
+}
diff --git a/llvm/test/CodeGen/AArch64/arm64-2012-05-09-LOADgot-bug.ll b/llvm/test/CodeGen/AArch64/arm64-2012-05-09-LOADgot-bug.ll
index 761462be6b4b0..e9a550d07eb58 100644
--- a/llvm/test/CodeGen/AArch64/arm64-2012-05-09-LOADgot-bug.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-2012-05-09-LOADgot-bug.ll
@@ -8,7 +8,7 @@ entry:
   store i64 %ext, ptr %addr, align 8
 ; CHECK:             adrp    x{{[0-9]+}}, _x@GOTPAGE
 ; CHECK:        ldr     x{{[0-9]+}}, [x{{[0-9]+}}, _x@GOTPAGEOFF]
-; CHECK-NEXT:        and     x{{[0-9]+}}, x{{[0-9]+}}, #0xffffffff
+; CHECK-NEXT:        mov     w{{[0-9]+}}, w{{[0-9]+}}
 ; CHECK-NEXT:        str     x{{[0-9]+}}, [x{{[0-9]+}}]
   ret void
 }
diff --git a/llvm/test/CodeGen/AArch64/arm64_32-memcpy.ll b/llvm/test/CodeGen/AArch64/arm64_32-memcpy.ll
index ed71f0958604f..64c5cfdfec75a 100644
--- a/llvm/test/CodeGen/AArch64/arm64_32-memcpy.ll
+++ b/llvm/test/CodeGen/AArch64/arm64_32-memcpy.ll
@@ -2,9 +2,9 @@
 
 define i64 @test_memcpy(ptr %addr, ptr %src, i1 %tst) minsize {
 ; CHECK-LABEL: test_memcpy:
-; CHECK: ldr [[VAL64:x[0-9]+]], [x0]
+; CHECK: ldr x[[VAL64:[0-9]+]], [x0]
 ; [...]
-; CHECK: and x0, [[VAL64]], #0xffffffff
+; CHECK: mov w0, w[[VAL64]]
 ; CHECK: bl _memcpy
 
   %val64 = load i64, ptr %addr
@@ -22,9 +22,9 @@ false:
 
 define i64 @test_memmove(ptr %addr, ptr %src, i1 %tst) minsize {
 ; CHECK-LABEL: test_memmove:
-; CHECK: ldr [[VAL64:x[0-9]+]], [x0]
+; CHECK: ldr x[[VAL64:[0-9]+]], [x0]
 ; [...]
-; CHECK: and x0, [[VAL64]], #0xffffffff
+; CHECK: mov w0, w[[VAL64]]
 ; CHECK: bl _memmove
 
   %val64 = load i64, ptr %addr
@@ -42,9 +42,9 @@ false:
 
 define i64 @test_memset(ptr %addr, ptr %src, i1 %tst) minsize {
 ; CHECK-LABEL: test_memset:
-; CHECK: ldr [[VAL64:x[0-9]+]], [x0]
+; CHECK: ldr x[[VAL64:[0-9]+]], [x0]
 ; [...]
-; CHECK: and x0, [[VAL64]], #0xffffffff
+; CHECK: mov w0, w[[VAL64]]
 ; CHECK: bl _memset
 
   %val64 = load i64, ptr %addr
diff --git a/llvm/test/CodeGen/AArch64/arm64_32-pointer-extend.ll b/llvm/test/CodeGen/AArch64/arm64_32-pointer-extend.ll
index 2d6f0fbe30888..7b004b2f6d310 100644
--- a/llvm/test/CodeGen/AArch64/arm64_32-pointer-extend.ll
+++ b/llvm/test/CodeGen/AArch64/arm64_32-pointer-extend.ll
@@ -2,7 +2,7 @@
 
 define void @pass_pointer(i64 %in) {
 ; CHECK-LABEL: pass_pointer:
-; CHECK: and x0, x0, #0xffffffff
+; CHECK: mov w0, w0
 ; CHECK: bl _take_pointer
 
   %in32 = trunc i64 %in to i32
@@ -39,8 +39,8 @@ define void @caller_ptr_stack_slot(ptr %ptr) {
 
 define ptr @return_ptr(i64 %in, i64 %r) {
 ; CHECK-LABEL: return_ptr:
-; CHECK: sdiv [[VAL64:x[0-9]+]], x0, x1
-; CHECK: and x0, [[VAL64]], #0xffffffff
+; CHECK: sdiv x[[VAL64:[0-9]+]], x0, x1
+; CHECK: mov w0, w[[VAL64]]
 
   %sum = sdiv i64 %in, %r
   %sum32 = trunc i64 %sum to i32
diff --git a/llvm/test/CodeGen/AArch64/arm64_32.ll b/llvm/test/CodeGen/AArch64/arm64_32.ll
index 716fdd6eac15c..c63edf0ceeea3 100644
--- a/llvm/test/CodeGen/AArch64/arm64_32.ll
+++ b/llvm/test/CodeGen/AArch64/arm64_32.ll
@@ -598,7 +598,7 @@ define void @test_asm_memory(ptr %base.addr) {
 
 define void @test_unsafe_asm_memory(i64 %val) {
 ; CHECK-LABEL: test_unsafe_asm_memory:
-; CHECK: and x[[ADDR:[0-9]+]], x0, #0xffffffff
+; CHECK: mov w[[ADDR:[0-9]+]], w0
 ; CHECK: str wzr, [x[[ADDR]]]
   %addr_int = trunc i64 %val to i32
   %addr = inttoptr i32 %addr_int to ptr
@@ -615,7 +615,8 @@ define [9 x ptr] @test_demoted_return(ptr %in) {
 
 define ptr @test_inttoptr(i64 %in) {
 ; CHECK-LABEL: test_inttoptr:
-; CHECK: and x0, x0, #0xffffffff
+; CHECK-OPT: mov w0, w0
+; CHECK-FAST: and x0, x0, #0xffffffff
   %res = inttoptr i64 %in to ptr
   ret ptr %res
 }
@@ -732,7 +733,7 @@ define ptr @test_gep_nonpow2(ptr %a0, i32 %a1) {
 define void @test_memset(i64 %in, i8 %value)  {
 ; CHECK-LABEL: test_memset:
 ; CHECK-DAG: lsr x2, x0, #32
-; CHECK-DAG: and x0, x0, #0xffffffff
+; CHECK-DAG: mov w0, w0
 ; CHECK: b _memset
 
   %ptr.i32 = trunc i64 %in to i32
@@ -746,7 +747,7 @@ define void @test_memset(i64 %in, i8 %value)  {
 define void @test_bzero(i64 %in)  {
 ; CHECK-LABEL: test_bzero:
 ; CHECK-DAG: lsr x1, x0, #32
-; CHECK-DAG: and x0, x0, #0xffffffff
+; CHECK-DAG: mov w0, w0
 ; CHECK: b _bzero
 
   %ptr.i32 = trunc i64 %in to i32
diff --git a/llvm/test/CodeGen/AArch64/bitfield.ll b/llvm/test/CodeGen/AArch64/bitfield.ll
index 1dfa4a8e12001..6e18924ea19ee 100644
--- a/llvm/test/CodeGen/AArch64/bitfield.ll
+++ b/llvm/test/CodeGen/AArch64/bitfield.ll
@@ -173,7 +173,7 @@ define dso_local void @test_zext_inreg_64(i64 %in) {
   %trunc_i32 = trunc i64 %in to i32
   %zext_i32 = zext i32 %trunc_i32 to i64
   store volatile i64 %zext_i32, ptr @var64
-; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, #0xffffffff
+; CHECK: mov {{w[0-9]+}}, {{w[0-9]+}}
 
   ret void
 }
diff --git a/llvm/test/CodeGen/AArch64/pr58431.ll b/llvm/test/CodeGen/AArch64/pr58431.ll
index e87d8f7874d62..88bab4af95d64 100644
--- a/llvm/test/CodeGen/AArch64/pr58431.ll
+++ b/llvm/test/CodeGen/AArch64/pr58431.ll
@@ -5,7 +5,7 @@ define i32 @f(i64 %0) {
 ; CHECK-LABEL: f:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #10 // =0xa
-; CHECK-NEXT:    and x9, x0, #0xffffffff
+; CHECK-NEXT:    mov w9, w0
 ; CHECK-NEXT:    udiv x10, x9, x8
 ; CHECK-NEXT:    msub x0, x10, x8, x9
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
diff --git a/llvm/test/CodeGen/AArch64/swifterror.ll b/llvm/test/CodeGen/AArch64/swifterror.ll
index cd06f8dbfad84..07ee87e880aff 100644
--- a/llvm/test/CodeGen/AArch64/swifterror.ll
+++ b/llvm/test/CodeGen/AArch64/swifterror.ll
@@ -977,7 +977,7 @@ define float @foo_vararg(ptr swifterror %error_ptr_ref, ...) {
 ; CHECK-APPLE-ARM64_32-NEXT:    add x9, x29, #16
 ; CHECK-APPLE-ARM64_32-NEXT:    strb w8, [x0, #8]
 ; CHECK-APPLE-ARM64_32-NEXT:    orr w8, w9, #0x4
-; CHECK-APPLE-ARM64_32-NEXT:    and x10, x9, #0xfffffff0
+; CHECK-APPLE-ARM64_32-NEXT:    mov w10, w9
 ; CHECK-APPLE-ARM64_32-NEXT:    stur w8, [x29, #-8]
 ; CHECK-APPLE-ARM64_32-NEXT:    ldr w11, [x10]
 ; CHECK-APPLE-ARM64_32-NEXT:    orr w10, w9, #0x8

@davemgreen
Copy link
Collaborator Author

Thanks

@davemgreen davemgreen merged commit fe7fc54 into llvm:main Jul 14, 2024
9 checks passed
@davemgreen davemgreen deleted the gh-a64-usemovnotand branch July 14, 2024 12:33
aaryanshukla pushed a commit to aaryanshukla/llvm-project that referenced this pull request Jul 14, 2024
This adds a tablegen pattern to use ORRWrr (mov) as opposed to i64 AND
0xffffffff, as the mov will implicitly clear the upper bits. This can be
seen as a zext(trunc(..)), and could be simpler if it is eliminated.
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

None yet

3 participants