-
Notifications
You must be signed in to change notification settings - Fork 15.7k
[AArch64] Improve SIMD immediate generation with SVE. #173273
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Conversation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Allow using SVE DUPM instructions to materialise fixed length vectors.
Member
|
@llvm/pr-subscribers-backend-aarch64 Author: Ricardo Jesus (rj-jesus) ChangesAllow using SVE DUPM instructions to materialise fixed-length vectors. Fixes #122422. Full diff: https://github.com/llvm/llvm-project/pull/173273.diff 6 Files Affected:
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index a658fb78852ec..c9aed1a8d816a 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -4432,31 +4432,8 @@ bool AArch64DAGToDAGISel::SelectSVELogicalImm(SDValue N, MVT VT, SDValue &Imm,
if (Invert)
ImmVal = ~ImmVal;
- // Shift mask depending on type size.
- switch (VT.SimpleTy) {
- case MVT::i8:
- ImmVal &= 0xFF;
- ImmVal |= ImmVal << 8;
- ImmVal |= ImmVal << 16;
- ImmVal |= ImmVal << 32;
- break;
- case MVT::i16:
- ImmVal &= 0xFFFF;
- ImmVal |= ImmVal << 16;
- ImmVal |= ImmVal << 32;
- break;
- case MVT::i32:
- ImmVal &= 0xFFFFFFFF;
- ImmVal |= ImmVal << 32;
- break;
- case MVT::i64:
- break;
- default:
- llvm_unreachable("Unexpected type");
- }
-
uint64_t encoding;
- if (!AArch64_AM::processLogicalImmediate(ImmVal, 64, encoding))
+ if (!AArch64_AM::isSVELogicalImm(VT.getScalarSizeInBits(), ImmVal, encoding))
return false;
Imm = CurDAG->getTargetConstant(encoding, SDLoc(N), MVT::i64);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index eff24f3a9fb29..58193b36fe1fb 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -15635,7 +15635,9 @@ static SDValue trySVESplat64(SDValue Op, SelectionDAG &DAG,
// See if we can make use of the SVE dup instruction.
APInt Val64 = DefBits.trunc(64);
int32_t ImmVal, ShiftVal;
- if (!AArch64_AM::isSVECpyDupImm(64, Val64.getSExtValue(), ImmVal, ShiftVal))
+ uint64_t Encoding;
+ if (!AArch64_AM::isSVECpyDupImm(64, Val64.getSExtValue(), ImmVal, ShiftVal) &&
+ !AArch64_AM::isSVELogicalImm(64, Val64.getZExtValue(), Encoding))
return SDValue();
SDLoc DL(Op);
diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
index 4ae5d040d5e8a..35f729a5ef96c 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
@@ -901,6 +901,34 @@ static inline bool isSVECpyDupImm(int SizeInBits, int64_t Val, int32_t &Imm,
return false;
}
+static inline bool isSVELogicalImm(unsigned SizeInBits, uint64_t ImmVal,
+ uint64_t &Encoding) {
+ // Shift mask depending on type size.
+ switch (SizeInBits) {
+ case 8:
+ ImmVal &= 0xFF;
+ ImmVal |= ImmVal << 8;
+ ImmVal |= ImmVal << 16;
+ ImmVal |= ImmVal << 32;
+ break;
+ case 16:
+ ImmVal &= 0xFFFF;
+ ImmVal |= ImmVal << 16;
+ ImmVal |= ImmVal << 32;
+ break;
+ case 32:
+ ImmVal &= 0xFFFFFFFF;
+ ImmVal |= ImmVal << 32;
+ break;
+ case 64:
+ break;
+ default:
+ llvm_unreachable("Unexpected size");
+ }
+
+ return processLogicalImmediate(ImmVal, 64, Encoding);
+}
+
} // end namespace AArch64_AM
} // end namespace llvm
diff --git a/llvm/test/CodeGen/AArch64/movi64_sve.ll b/llvm/test/CodeGen/AArch64/movi64_sve.ll
index 1d4e00d0c3d10..44a3865990bbf 100644
--- a/llvm/test/CodeGen/AArch64/movi64_sve.ll
+++ b/llvm/test/CodeGen/AArch64/movi64_sve.ll
@@ -116,11 +116,25 @@ define <4 x i32> @movi_v4i32_2() {
ret <4 x i32> <i32 32512, i32 0, i32 32512, i32 0>
}
+define <4 x i32> @movi_v4i32_4092() {
+; NEON-LABEL: movi_v4i32_4092:
+; NEON: // %bb.0:
+; NEON-NEXT: mov w8, #4092 // =0xffc
+; NEON-NEXT: dup v0.4s, w8
+; NEON-NEXT: ret
+;
+; SVE-LABEL: movi_v4i32_4092:
+; SVE: // %bb.0:
+; SVE-NEXT: mov z0.s, #4092 // =0xffc
+; SVE-NEXT: ret
+ ret <4 x i32> splat (i32 4092)
+}
+
define <8 x i16> @movi_v8i16_1() {
; NEON-LABEL: movi_v8i16_1:
; NEON: // %bb.0:
-; NEON-NEXT: adrp x8, .LCPI8_0
-; NEON-NEXT: ldr q0, [x8, :lo12:.LCPI8_0]
+; NEON-NEXT: adrp x8, .LCPI9_0
+; NEON-NEXT: ldr q0, [x8, :lo12:.LCPI9_0]
; NEON-NEXT: ret
;
; SVE-LABEL: movi_v8i16_1:
@@ -133,8 +147,8 @@ define <8 x i16> @movi_v8i16_1() {
define <8 x i16> @movi_v8i16_2() {
; NEON-LABEL: movi_v8i16_2:
; NEON: // %bb.0:
-; NEON-NEXT: adrp x8, .LCPI9_0
-; NEON-NEXT: ldr q0, [x8, :lo12:.LCPI9_0]
+; NEON-NEXT: adrp x8, .LCPI10_0
+; NEON-NEXT: ldr q0, [x8, :lo12:.LCPI10_0]
; NEON-NEXT: ret
;
; SVE-LABEL: movi_v8i16_2:
@@ -144,11 +158,25 @@ define <8 x i16> @movi_v8i16_2() {
ret <8 x i16> <i16 32512, i16 0, i16 0, i16 0, i16 32512, i16 0, i16 0, i16 0>
}
+define <8 x i16> @movi_v8i16_510() {
+; NEON-LABEL: movi_v8i16_510:
+; NEON: // %bb.0:
+; NEON-NEXT: mov w8, #510 // =0x1fe
+; NEON-NEXT: dup v0.8h, w8
+; NEON-NEXT: ret
+;
+; SVE-LABEL: movi_v8i16_510:
+; SVE: // %bb.0:
+; SVE-NEXT: mov z0.h, #510 // =0x1fe
+; SVE-NEXT: ret
+ ret <8 x i16> splat (i16 510)
+}
+
define <16 x i8> @movi_v16i8_1() {
; NEON-LABEL: movi_v16i8_1:
; NEON: // %bb.0:
-; NEON-NEXT: adrp x8, .LCPI10_0
-; NEON-NEXT: ldr q0, [x8, :lo12:.LCPI10_0]
+; NEON-NEXT: adrp x8, .LCPI12_0
+; NEON-NEXT: ldr q0, [x8, :lo12:.LCPI12_0]
; NEON-NEXT: ret
;
; SVE-LABEL: movi_v16i8_1:
@@ -161,8 +189,8 @@ define <16 x i8> @movi_v16i8_1() {
define <16 x i8> @movi_v16i8_2() {
; NEON-LABEL: movi_v16i8_2:
; NEON: // %bb.0:
-; NEON-NEXT: adrp x8, .LCPI11_0
-; NEON-NEXT: ldr q0, [x8, :lo12:.LCPI11_0]
+; NEON-NEXT: adrp x8, .LCPI13_0
+; NEON-NEXT: ldr q0, [x8, :lo12:.LCPI13_0]
; NEON-NEXT: ret
;
; SVE-LABEL: movi_v16i8_2:
@@ -175,20 +203,30 @@ define <16 x i8> @movi_v16i8_2() {
; Negative cases
define <2 x i64> @movi_128_v2i64() {
-; COMMON-LABEL: movi_128_v2i64:
-; COMMON: // %bb.0:
-; COMMON-NEXT: mov w8, #128 // =0x80
-; COMMON-NEXT: dup v0.2d, x8
-; COMMON-NEXT: ret
+; NEON-LABEL: movi_128_v2i64:
+; NEON: // %bb.0:
+; NEON-NEXT: mov w8, #128 // =0x80
+; NEON-NEXT: dup v0.2d, x8
+; NEON-NEXT: ret
+;
+; SVE-LABEL: movi_128_v2i64:
+; SVE: // %bb.0:
+; SVE-NEXT: mov z0.d, #128 // =0x80
+; SVE-NEXT: ret
ret <2 x i64> splat (i64 128)
}
define <2 x i64> @movi_m127_v2i64() {
-; COMMON-LABEL: movi_m127_v2i64:
-; COMMON: // %bb.0:
-; COMMON-NEXT: mov x8, #-129 // =0xffffffffffffff7f
-; COMMON-NEXT: dup v0.2d, x8
-; COMMON-NEXT: ret
+; NEON-LABEL: movi_m127_v2i64:
+; NEON: // %bb.0:
+; NEON-NEXT: mov x8, #-129 // =0xffffffffffffff7f
+; NEON-NEXT: dup v0.2d, x8
+; NEON-NEXT: ret
+;
+; SVE-LABEL: movi_m127_v2i64:
+; SVE: // %bb.0:
+; SVE-NEXT: mov z0.d, #-129 // =0xffffffffffffff7f
+; SVE-NEXT: ret
ret <2 x i64> splat (i64 -129)
}
@@ -202,11 +240,16 @@ define <2 x i64> @movi_32513_v2i64() {
}
define <2 x i64> @movi_m32769_v2i64() {
-; COMMON-LABEL: movi_m32769_v2i64:
-; COMMON: // %bb.0:
-; COMMON-NEXT: mov x8, #-32769 // =0xffffffffffff7fff
-; COMMON-NEXT: dup v0.2d, x8
-; COMMON-NEXT: ret
+; NEON-LABEL: movi_m32769_v2i64:
+; NEON: // %bb.0:
+; NEON-NEXT: mov x8, #-32769 // =0xffffffffffff7fff
+; NEON-NEXT: dup v0.2d, x8
+; NEON-NEXT: ret
+;
+; SVE-LABEL: movi_m32769_v2i64:
+; SVE: // %bb.0:
+; SVE-NEXT: mov z0.d, #0xffffffffffff7fff
+; SVE-NEXT: ret
ret <2 x i64> splat (i64 -32769)
}
@@ -220,19 +263,29 @@ define <2 x i64> @movi_257_v2i64() {
}
define <4 x i32> @movi_v4i32_3() {
-; COMMON-LABEL: movi_v4i32_3:
-; COMMON: // %bb.0:
-; COMMON-NEXT: adrp x8, .LCPI17_0
-; COMMON-NEXT: ldr q0, [x8, :lo12:.LCPI17_0]
-; COMMON-NEXT: ret
+; NEON-LABEL: movi_v4i32_3:
+; NEON: // %bb.0:
+; NEON-NEXT: adrp x8, .LCPI19_0
+; NEON-NEXT: ldr q0, [x8, :lo12:.LCPI19_0]
+; NEON-NEXT: ret
+;
+; SVE-LABEL: movi_v4i32_3:
+; SVE: // %bb.0:
+; SVE-NEXT: mov z0.d, #0xffffff80
+; SVE-NEXT: ret
ret <4 x i32> <i32 -128, i32 0, i32 -128, i32 0>
}
define <16 x i8> @movi_v16i8_3() {
-; COMMON-LABEL: movi_v16i8_3:
-; COMMON: // %bb.0:
-; COMMON-NEXT: adrp x8, .LCPI18_0
-; COMMON-NEXT: ldr q0, [x8, :lo12:.LCPI18_0]
-; COMMON-NEXT: ret
+; NEON-LABEL: movi_v16i8_3:
+; NEON: // %bb.0:
+; NEON-NEXT: adrp x8, .LCPI20_0
+; NEON-NEXT: ldr q0, [x8, :lo12:.LCPI20_0]
+; NEON-NEXT: ret
+;
+; SVE-LABEL: movi_v16i8_3:
+; SVE: // %bb.0:
+; SVE-NEXT: mov z0.d, #0x7f0000
+; SVE-NEXT: ret
ret <16 x i8> <i8 0, i8 0, i8 127, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 127, i8 0, i8 0, i8 0, i8 0, i8 0>
}
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fcopysign.ll
index 37450431d8a11..e93f0d601746d 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fcopysign.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fcopysign.ll
@@ -260,10 +260,9 @@ define void @test_copysign_v64f32_v64f32(ptr %ap, ptr %bp) vscale_range(16,0) #0
define void @test_copysign_v2f64_v2f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
; CHECK-LABEL: test_copysign_v2f64_v2f64:
; CHECK: // %bb.0:
-; CHECK-NEXT: movi v0.2d, #0xffffffffffffffff
+; CHECK-NEXT: mov z0.d, #0x7fffffffffffffff
; CHECK-NEXT: ldr q1, [x0]
; CHECK-NEXT: ldr q2, [x1]
-; CHECK-NEXT: fneg v0.2d, v0.2d
; CHECK-NEXT: bsl v0.16b, v1.16b, v2.16b
; CHECK-NEXT: str q0, [x0]
; CHECK-NEXT: ret
@@ -413,11 +412,10 @@ define void @test_copysign_v4f32_v4f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
define void @test_copysign_v2f64_v2f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
; CHECK-LABEL: test_copysign_v2f64_v2f32:
; CHECK: // %bb.0:
-; CHECK-NEXT: movi v0.2d, #0xffffffffffffffff
; CHECK-NEXT: ldr d1, [x1]
+; CHECK-NEXT: mov z0.d, #0x7fffffffffffffff
; CHECK-NEXT: ldr q2, [x0]
; CHECK-NEXT: fcvtl v1.2d, v1.2s
-; CHECK-NEXT: fneg v0.2d, v0.2d
; CHECK-NEXT: bsl v0.16b, v2.16b, v1.16b
; CHECK-NEXT: str q0, [x0]
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/sve2-fixed-length-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve2-fixed-length-fcopysign.ll
index 98056fd1d0bf0..b8d07c69189e9 100644
--- a/llvm/test/CodeGen/AArch64/sve2-fixed-length-fcopysign.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-fixed-length-fcopysign.ll
@@ -247,10 +247,9 @@ define void @test_copysign_v64f32_v64f32(ptr %ap, ptr %bp) vscale_range(16,0) #0
define void @test_copysign_v2f64_v2f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
; CHECK-LABEL: test_copysign_v2f64_v2f64:
; CHECK: // %bb.0:
-; CHECK-NEXT: movi v0.2d, #0xffffffffffffffff
+; CHECK-NEXT: mov z0.d, #0x7fffffffffffffff
; CHECK-NEXT: ldr q1, [x0]
; CHECK-NEXT: ldr q2, [x1]
-; CHECK-NEXT: fneg v0.2d, v0.2d
; CHECK-NEXT: bsl v0.16b, v1.16b, v2.16b
; CHECK-NEXT: str q0, [x0]
; CHECK-NEXT: ret
@@ -393,11 +392,10 @@ define void @test_copysign_v4f32_v4f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
define void @test_copysign_v2f64_v2f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
; CHECK-LABEL: test_copysign_v2f64_v2f32:
; CHECK: // %bb.0:
-; CHECK-NEXT: movi v0.2d, #0xffffffffffffffff
; CHECK-NEXT: ldr d1, [x1]
+; CHECK-NEXT: mov z0.d, #0x7fffffffffffffff
; CHECK-NEXT: ldr q2, [x0]
; CHECK-NEXT: fcvtl v1.2d, v1.2s
-; CHECK-NEXT: fneg v0.2d, v0.2d
; CHECK-NEXT: bsl v0.16b, v2.16b, v1.16b
; CHECK-NEXT: str q0, [x0]
; CHECK-NEXT: ret
|
paulwalker-arm
approved these changes
Dec 22, 2025
valadaptive
pushed a commit
to valadaptive/llvm-project
that referenced
this pull request
Dec 24, 2025
Allow using SVE DUPM instructions to materialise fixed-length vectors. Fixes llvm#122422.
mahesh-attarde
pushed a commit
to mahesh-attarde/llvm-project
that referenced
this pull request
Jan 6, 2026
Allow using SVE DUPM instructions to materialise fixed-length vectors. Fixes llvm#122422.
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Allow using SVE DUPM instructions to materialise fixed-length vectors.
Fixes #122422.