Skip to content

Conversation

@rj-jesus
Copy link
Contributor

Allow using SVE DUPM instructions to materialise fixed-length vectors.

Fixes #122422.

Allow using SVE DUPM instructions to materialise fixed length vectors.
@llvmbot
Copy link
Member

llvmbot commented Dec 22, 2025

@llvm/pr-subscribers-backend-aarch64

Author: Ricardo Jesus (rj-jesus)

Changes

Allow using SVE DUPM instructions to materialise fixed-length vectors.

Fixes #122422.


Full diff: https://github.com/llvm/llvm-project/pull/173273.diff

6 Files Affected:

  • (modified) llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp (+1-24)
  • (modified) llvm/lib/Target/AArch64/AArch64ISelLowering.cpp (+3-1)
  • (modified) llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h (+28)
  • (modified) llvm/test/CodeGen/AArch64/movi64_sve.ll (+86-33)
  • (modified) llvm/test/CodeGen/AArch64/sve-fixed-length-fcopysign.ll (+2-4)
  • (modified) llvm/test/CodeGen/AArch64/sve2-fixed-length-fcopysign.ll (+2-4)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index a658fb78852ec..c9aed1a8d816a 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -4432,31 +4432,8 @@ bool AArch64DAGToDAGISel::SelectSVELogicalImm(SDValue N, MVT VT, SDValue &Imm,
   if (Invert)
     ImmVal = ~ImmVal;
 
-  // Shift mask depending on type size.
-  switch (VT.SimpleTy) {
-  case MVT::i8:
-    ImmVal &= 0xFF;
-    ImmVal |= ImmVal << 8;
-    ImmVal |= ImmVal << 16;
-    ImmVal |= ImmVal << 32;
-    break;
-  case MVT::i16:
-    ImmVal &= 0xFFFF;
-    ImmVal |= ImmVal << 16;
-    ImmVal |= ImmVal << 32;
-    break;
-  case MVT::i32:
-    ImmVal &= 0xFFFFFFFF;
-    ImmVal |= ImmVal << 32;
-    break;
-  case MVT::i64:
-    break;
-  default:
-    llvm_unreachable("Unexpected type");
-  }
-
   uint64_t encoding;
-  if (!AArch64_AM::processLogicalImmediate(ImmVal, 64, encoding))
+  if (!AArch64_AM::isSVELogicalImm(VT.getScalarSizeInBits(), ImmVal, encoding))
     return false;
 
   Imm = CurDAG->getTargetConstant(encoding, SDLoc(N), MVT::i64);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index eff24f3a9fb29..58193b36fe1fb 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -15635,7 +15635,9 @@ static SDValue trySVESplat64(SDValue Op, SelectionDAG &DAG,
   // See if we can make use of the SVE dup instruction.
   APInt Val64 = DefBits.trunc(64);
   int32_t ImmVal, ShiftVal;
-  if (!AArch64_AM::isSVECpyDupImm(64, Val64.getSExtValue(), ImmVal, ShiftVal))
+  uint64_t Encoding;
+  if (!AArch64_AM::isSVECpyDupImm(64, Val64.getSExtValue(), ImmVal, ShiftVal) &&
+      !AArch64_AM::isSVELogicalImm(64, Val64.getZExtValue(), Encoding))
     return SDValue();
 
   SDLoc DL(Op);
diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
index 4ae5d040d5e8a..35f729a5ef96c 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
@@ -901,6 +901,34 @@ static inline bool isSVECpyDupImm(int SizeInBits, int64_t Val, int32_t &Imm,
   return false;
 }
 
+static inline bool isSVELogicalImm(unsigned SizeInBits, uint64_t ImmVal,
+                                   uint64_t &Encoding) {
+  // Shift mask depending on type size.
+  switch (SizeInBits) {
+  case 8:
+    ImmVal &= 0xFF;
+    ImmVal |= ImmVal << 8;
+    ImmVal |= ImmVal << 16;
+    ImmVal |= ImmVal << 32;
+    break;
+  case 16:
+    ImmVal &= 0xFFFF;
+    ImmVal |= ImmVal << 16;
+    ImmVal |= ImmVal << 32;
+    break;
+  case 32:
+    ImmVal &= 0xFFFFFFFF;
+    ImmVal |= ImmVal << 32;
+    break;
+  case 64:
+    break;
+  default:
+    llvm_unreachable("Unexpected size");
+  }
+
+  return processLogicalImmediate(ImmVal, 64, Encoding);
+}
+
 } // end namespace AArch64_AM
 
 } // end namespace llvm
diff --git a/llvm/test/CodeGen/AArch64/movi64_sve.ll b/llvm/test/CodeGen/AArch64/movi64_sve.ll
index 1d4e00d0c3d10..44a3865990bbf 100644
--- a/llvm/test/CodeGen/AArch64/movi64_sve.ll
+++ b/llvm/test/CodeGen/AArch64/movi64_sve.ll
@@ -116,11 +116,25 @@ define <4 x i32> @movi_v4i32_2() {
   ret <4 x i32> <i32 32512, i32 0, i32 32512, i32 0>
 }
 
+define <4 x i32> @movi_v4i32_4092() {
+; NEON-LABEL: movi_v4i32_4092:
+; NEON:       // %bb.0:
+; NEON-NEXT:    mov w8, #4092 // =0xffc
+; NEON-NEXT:    dup v0.4s, w8
+; NEON-NEXT:    ret
+;
+; SVE-LABEL: movi_v4i32_4092:
+; SVE:       // %bb.0:
+; SVE-NEXT:    mov z0.s, #4092 // =0xffc
+; SVE-NEXT:    ret
+  ret <4 x i32> splat (i32 4092)
+}
+
 define <8 x i16> @movi_v8i16_1() {
 ; NEON-LABEL: movi_v8i16_1:
 ; NEON:       // %bb.0:
-; NEON-NEXT:    adrp x8, .LCPI8_0
-; NEON-NEXT:    ldr q0, [x8, :lo12:.LCPI8_0]
+; NEON-NEXT:    adrp x8, .LCPI9_0
+; NEON-NEXT:    ldr q0, [x8, :lo12:.LCPI9_0]
 ; NEON-NEXT:    ret
 ;
 ; SVE-LABEL: movi_v8i16_1:
@@ -133,8 +147,8 @@ define <8 x i16> @movi_v8i16_1() {
 define <8 x i16> @movi_v8i16_2() {
 ; NEON-LABEL: movi_v8i16_2:
 ; NEON:       // %bb.0:
-; NEON-NEXT:    adrp x8, .LCPI9_0
-; NEON-NEXT:    ldr q0, [x8, :lo12:.LCPI9_0]
+; NEON-NEXT:    adrp x8, .LCPI10_0
+; NEON-NEXT:    ldr q0, [x8, :lo12:.LCPI10_0]
 ; NEON-NEXT:    ret
 ;
 ; SVE-LABEL: movi_v8i16_2:
@@ -144,11 +158,25 @@ define <8 x i16> @movi_v8i16_2() {
   ret <8 x i16> <i16 32512, i16 0, i16 0, i16 0, i16 32512, i16 0, i16 0, i16 0>
 }
 
+define <8 x i16> @movi_v8i16_510() {
+; NEON-LABEL: movi_v8i16_510:
+; NEON:       // %bb.0:
+; NEON-NEXT:    mov w8, #510 // =0x1fe
+; NEON-NEXT:    dup v0.8h, w8
+; NEON-NEXT:    ret
+;
+; SVE-LABEL: movi_v8i16_510:
+; SVE:       // %bb.0:
+; SVE-NEXT:    mov z0.h, #510 // =0x1fe
+; SVE-NEXT:    ret
+  ret <8 x i16> splat (i16 510)
+}
+
 define <16 x i8> @movi_v16i8_1() {
 ; NEON-LABEL: movi_v16i8_1:
 ; NEON:       // %bb.0:
-; NEON-NEXT:    adrp x8, .LCPI10_0
-; NEON-NEXT:    ldr q0, [x8, :lo12:.LCPI10_0]
+; NEON-NEXT:    adrp x8, .LCPI12_0
+; NEON-NEXT:    ldr q0, [x8, :lo12:.LCPI12_0]
 ; NEON-NEXT:    ret
 ;
 ; SVE-LABEL: movi_v16i8_1:
@@ -161,8 +189,8 @@ define <16 x i8> @movi_v16i8_1() {
 define <16 x i8> @movi_v16i8_2() {
 ; NEON-LABEL: movi_v16i8_2:
 ; NEON:       // %bb.0:
-; NEON-NEXT:    adrp x8, .LCPI11_0
-; NEON-NEXT:    ldr q0, [x8, :lo12:.LCPI11_0]
+; NEON-NEXT:    adrp x8, .LCPI13_0
+; NEON-NEXT:    ldr q0, [x8, :lo12:.LCPI13_0]
 ; NEON-NEXT:    ret
 ;
 ; SVE-LABEL: movi_v16i8_2:
@@ -175,20 +203,30 @@ define <16 x i8> @movi_v16i8_2() {
 ; Negative cases
 
 define <2 x i64> @movi_128_v2i64() {
-; COMMON-LABEL: movi_128_v2i64:
-; COMMON:       // %bb.0:
-; COMMON-NEXT:    mov w8, #128 // =0x80
-; COMMON-NEXT:    dup v0.2d, x8
-; COMMON-NEXT:    ret
+; NEON-LABEL: movi_128_v2i64:
+; NEON:       // %bb.0:
+; NEON-NEXT:    mov w8, #128 // =0x80
+; NEON-NEXT:    dup v0.2d, x8
+; NEON-NEXT:    ret
+;
+; SVE-LABEL: movi_128_v2i64:
+; SVE:       // %bb.0:
+; SVE-NEXT:    mov z0.d, #128 // =0x80
+; SVE-NEXT:    ret
   ret <2 x i64> splat (i64 128)
 }
 
 define <2 x i64> @movi_m127_v2i64() {
-; COMMON-LABEL: movi_m127_v2i64:
-; COMMON:       // %bb.0:
-; COMMON-NEXT:    mov x8, #-129 // =0xffffffffffffff7f
-; COMMON-NEXT:    dup v0.2d, x8
-; COMMON-NEXT:    ret
+; NEON-LABEL: movi_m127_v2i64:
+; NEON:       // %bb.0:
+; NEON-NEXT:    mov x8, #-129 // =0xffffffffffffff7f
+; NEON-NEXT:    dup v0.2d, x8
+; NEON-NEXT:    ret
+;
+; SVE-LABEL: movi_m127_v2i64:
+; SVE:       // %bb.0:
+; SVE-NEXT:    mov z0.d, #-129 // =0xffffffffffffff7f
+; SVE-NEXT:    ret
   ret <2 x i64> splat (i64 -129)
 }
 
@@ -202,11 +240,16 @@ define <2 x i64> @movi_32513_v2i64() {
 }
 
 define <2 x i64> @movi_m32769_v2i64() {
-; COMMON-LABEL: movi_m32769_v2i64:
-; COMMON:       // %bb.0:
-; COMMON-NEXT:    mov x8, #-32769 // =0xffffffffffff7fff
-; COMMON-NEXT:    dup v0.2d, x8
-; COMMON-NEXT:    ret
+; NEON-LABEL: movi_m32769_v2i64:
+; NEON:       // %bb.0:
+; NEON-NEXT:    mov x8, #-32769 // =0xffffffffffff7fff
+; NEON-NEXT:    dup v0.2d, x8
+; NEON-NEXT:    ret
+;
+; SVE-LABEL: movi_m32769_v2i64:
+; SVE:       // %bb.0:
+; SVE-NEXT:    mov z0.d, #0xffffffffffff7fff
+; SVE-NEXT:    ret
   ret <2 x i64> splat (i64 -32769)
 }
 
@@ -220,19 +263,29 @@ define <2 x i64> @movi_257_v2i64() {
 }
 
 define <4 x i32> @movi_v4i32_3() {
-; COMMON-LABEL: movi_v4i32_3:
-; COMMON:       // %bb.0:
-; COMMON-NEXT:    adrp x8, .LCPI17_0
-; COMMON-NEXT:    ldr q0, [x8, :lo12:.LCPI17_0]
-; COMMON-NEXT:    ret
+; NEON-LABEL: movi_v4i32_3:
+; NEON:       // %bb.0:
+; NEON-NEXT:    adrp x8, .LCPI19_0
+; NEON-NEXT:    ldr q0, [x8, :lo12:.LCPI19_0]
+; NEON-NEXT:    ret
+;
+; SVE-LABEL: movi_v4i32_3:
+; SVE:       // %bb.0:
+; SVE-NEXT:    mov z0.d, #0xffffff80
+; SVE-NEXT:    ret
   ret <4 x i32> <i32 -128, i32 0, i32 -128, i32 0>
 }
 
 define <16 x i8> @movi_v16i8_3() {
-; COMMON-LABEL: movi_v16i8_3:
-; COMMON:       // %bb.0:
-; COMMON-NEXT:    adrp x8, .LCPI18_0
-; COMMON-NEXT:    ldr q0, [x8, :lo12:.LCPI18_0]
-; COMMON-NEXT:    ret
+; NEON-LABEL: movi_v16i8_3:
+; NEON:       // %bb.0:
+; NEON-NEXT:    adrp x8, .LCPI20_0
+; NEON-NEXT:    ldr q0, [x8, :lo12:.LCPI20_0]
+; NEON-NEXT:    ret
+;
+; SVE-LABEL: movi_v16i8_3:
+; SVE:       // %bb.0:
+; SVE-NEXT:    mov z0.d, #0x7f0000
+; SVE-NEXT:    ret
   ret <16 x i8> <i8 0, i8 0, i8 127, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 127, i8 0, i8 0, i8 0, i8 0, i8 0>
 }
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fcopysign.ll
index 37450431d8a11..e93f0d601746d 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fcopysign.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fcopysign.ll
@@ -260,10 +260,9 @@ define void @test_copysign_v64f32_v64f32(ptr %ap, ptr %bp) vscale_range(16,0) #0
 define void @test_copysign_v2f64_v2f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
 ; CHECK-LABEL: test_copysign_v2f64_v2f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    movi v0.2d, #0xffffffffffffffff
+; CHECK-NEXT:    mov z0.d, #0x7fffffffffffffff
 ; CHECK-NEXT:    ldr q1, [x0]
 ; CHECK-NEXT:    ldr q2, [x1]
-; CHECK-NEXT:    fneg v0.2d, v0.2d
 ; CHECK-NEXT:    bsl v0.16b, v1.16b, v2.16b
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
@@ -413,11 +412,10 @@ define void @test_copysign_v4f32_v4f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
 define void @test_copysign_v2f64_v2f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
 ; CHECK-LABEL: test_copysign_v2f64_v2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    movi v0.2d, #0xffffffffffffffff
 ; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    mov z0.d, #0x7fffffffffffffff
 ; CHECK-NEXT:    ldr q2, [x0]
 ; CHECK-NEXT:    fcvtl v1.2d, v1.2s
-; CHECK-NEXT:    fneg v0.2d, v0.2d
 ; CHECK-NEXT:    bsl v0.16b, v2.16b, v1.16b
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve2-fixed-length-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve2-fixed-length-fcopysign.ll
index 98056fd1d0bf0..b8d07c69189e9 100644
--- a/llvm/test/CodeGen/AArch64/sve2-fixed-length-fcopysign.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-fixed-length-fcopysign.ll
@@ -247,10 +247,9 @@ define void @test_copysign_v64f32_v64f32(ptr %ap, ptr %bp) vscale_range(16,0) #0
 define void @test_copysign_v2f64_v2f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
 ; CHECK-LABEL: test_copysign_v2f64_v2f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    movi v0.2d, #0xffffffffffffffff
+; CHECK-NEXT:    mov z0.d, #0x7fffffffffffffff
 ; CHECK-NEXT:    ldr q1, [x0]
 ; CHECK-NEXT:    ldr q2, [x1]
-; CHECK-NEXT:    fneg v0.2d, v0.2d
 ; CHECK-NEXT:    bsl v0.16b, v1.16b, v2.16b
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
@@ -393,11 +392,10 @@ define void @test_copysign_v4f32_v4f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
 define void @test_copysign_v2f64_v2f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
 ; CHECK-LABEL: test_copysign_v2f64_v2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    movi v0.2d, #0xffffffffffffffff
 ; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    mov z0.d, #0x7fffffffffffffff
 ; CHECK-NEXT:    ldr q2, [x0]
 ; CHECK-NEXT:    fcvtl v1.2d, v1.2s
-; CHECK-NEXT:    fneg v0.2d, v0.2d
 ; CHECK-NEXT:    bsl v0.16b, v2.16b, v1.16b
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret

@rj-jesus rj-jesus merged commit a76084f into llvm:main Dec 23, 2025
12 checks passed
@rj-jesus rj-jesus deleted the rjj/aarch64-improve-movi-sve branch December 23, 2025 11:03
valadaptive pushed a commit to valadaptive/llvm-project that referenced this pull request Dec 24, 2025
Allow using SVE DUPM instructions to materialise fixed-length vectors.

Fixes llvm#122422.
mahesh-attarde pushed a commit to mahesh-attarde/llvm-project that referenced this pull request Jan 6, 2026
Allow using SVE DUPM instructions to materialise fixed-length vectors.

Fixes llvm#122422.
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Projects

None yet

Development

Successfully merging this pull request may close these issues.

[AArch64][SVE] can improve SIMD immediate generation

3 participants