-
Notifications
You must be signed in to change notification settings - Fork 15.2k
AArch64: Convert tests to opaque pointers #167442
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
arsenm
merged 1 commit into
main
from
users/arsenm/aarch64/convert-test-opaque-pointers
Nov 11, 2025
Merged
AArch64: Convert tests to opaque pointers #167442
arsenm
merged 1 commit into
main
from
users/arsenm/aarch64/convert-test-opaque-pointers
Nov 11, 2025
Conversation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Member
|
@llvm/pr-subscribers-backend-aarch64 Author: Matt Arsenault (arsenm) ChangesPatch is 30.10 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/167442.diff 11 Files Affected:
diff --git a/llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators.ll b/llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators.ll
index a84d666c1be6b..d1bcad4724e48 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-reassociate-accumulators.ll
@@ -24,8 +24,8 @@ loop:
%acc_phi = phi <8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
%ptr1_i = getelementptr i8, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i8, ptr %ptr2, i32 %i
- %a = load <8 x i8>, <8 x i8>* %ptr1_i, align 1
- %b = load <8 x i8>, <8 x i8>* %ptr2_i, align 1
+ %a = load <8 x i8>, ptr %ptr1_i, align 1
+ %b = load <8 x i8>, ptr %ptr2_i, align 1
%vabd = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %a, <8 x i8> %b)
%vabd_ext = zext <8 x i8> %vabd to <8 x i16>
%acc_next = add <8 x i16> %vabd_ext, %acc_phi
@@ -65,8 +65,8 @@ loop:
%acc_phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
%ptr1_i = getelementptr i16, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i16, ptr %ptr2, i32 %i
- %a = load <4 x i16>, <4 x i16>* %ptr1_i, align 1
- %b = load <4 x i16>, <4 x i16>* %ptr2_i, align 1
+ %a = load <4 x i16>, ptr %ptr1_i, align 1
+ %b = load <4 x i16>, ptr %ptr2_i, align 1
%vabd = tail call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %a, <4 x i16> %b)
%vmov = zext <4 x i16> %vabd to <4 x i32>
%acc_next = add <4 x i32> %vmov, %acc_phi
@@ -116,8 +116,8 @@ loop:
%acc_phi_lo = phi <8 x i16> [ zeroinitializer, %entry ], [ %acc_next_lo, %loop ]
%ptr1_i = getelementptr i8, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i8, ptr %ptr2, i32 %i
- %a = load <16 x i8>, <16 x i8>* %ptr1_i, align 1
- %b = load <16 x i8>, <16 x i8>* %ptr2_i, align 1
+ %a = load <16 x i8>, ptr %ptr1_i, align 1
+ %b = load <16 x i8>, ptr %ptr2_i, align 1
%a_hi = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%b_hi = shufflevector <16 x i8> %b, <16 x i8> zeroinitializer, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%a_lo = shufflevector <16 x i8> %a, <16 x i8> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -160,8 +160,8 @@ loop:
%acc_phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
%ptr1_i = getelementptr i32, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i32, ptr %ptr2, i32 %i
- %a = load <4 x i32>, <4 x i32>* %ptr1_i, align 1
- %b = load <4 x i32>, <4 x i32>* %ptr2_i, align 1
+ %a = load <4 x i32>, ptr %ptr1_i, align 1
+ %b = load <4 x i32>, ptr %ptr2_i, align 1
%vabd = tail call <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32> %a, <4 x i32> %b)
%acc_next = add <4 x i32> %acc_phi, %vabd
%next_i = add i32 %i, 4
@@ -198,8 +198,8 @@ loop:
; Load values from ptr1 and ptr2
%ptr1_i = getelementptr i32, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i32, ptr %ptr2, i32 %i
- %a = load <4 x i32>, <4 x i32>* %ptr1_i, align 1
- %b = load <4 x i32>, <4 x i32>* %ptr2_i, align 1
+ %a = load <4 x i32>, ptr %ptr1_i, align 1
+ %b = load <4 x i32>, ptr %ptr2_i, align 1
; Perform the intrinsic operation
%vabd = tail call <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32> %a, <4 x i32> %b)
%acc_next = add <4 x i32> %acc_phi, %vabd
@@ -237,8 +237,8 @@ loop:
%acc_phi = phi <2 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
%ptr1_i = getelementptr i32, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i32, ptr %ptr2, i32 %i
- %a = load <2 x i32>, <2 x i32>* %ptr1_i, align 1
- %b = load <2 x i32>, <2 x i32>* %ptr2_i, align 1
+ %a = load <2 x i32>, ptr %ptr1_i, align 1
+ %b = load <2 x i32>, ptr %ptr2_i, align 1
%vabd = tail call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %a, <2 x i32> %b)
%acc_next = add <2 x i32> %acc_phi, %vabd
%next_i = add i32 %i, 2
@@ -272,8 +272,8 @@ loop:
%acc_phi = phi <8 x i8> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
%ptr1_i = getelementptr i8, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i8, ptr %ptr2, i32 %i
- %a = load <8 x i8>, <8 x i8>* %ptr1_i, align 1
- %b = load <8 x i8>, <8 x i8>* %ptr2_i, align 1
+ %a = load <8 x i8>, ptr %ptr1_i, align 1
+ %b = load <8 x i8>, ptr %ptr2_i, align 1
%vabd = tail call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %a, <8 x i8> %b)
%acc_next = add <8 x i8> %acc_phi, %vabd
%next_i = add i32 %i, 8
@@ -307,8 +307,8 @@ loop:
%acc_phi = phi <16 x i8> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
%ptr1_i = getelementptr i8, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i8, ptr %ptr2, i32 %i
- %a = load <16 x i8>, <16 x i8>* %ptr1_i, align 1
- %b = load <16 x i8>, <16 x i8>* %ptr2_i, align 1
+ %a = load <16 x i8>, ptr %ptr1_i, align 1
+ %b = load <16 x i8>, ptr %ptr2_i, align 1
%vabd = tail call <16 x i8> @llvm.aarch64.neon.uabd.v16i8(<16 x i8> %a, <16 x i8> %b)
%acc_next = add <16 x i8> %acc_phi, %vabd
%next_i = add i32 %i, 16
@@ -342,8 +342,8 @@ loop:
%acc_phi = phi <8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
%ptr1_i = getelementptr i16, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i16, ptr %ptr2, i32 %i
- %a = load <8 x i16>, <8 x i16>* %ptr1_i, align 1
- %b = load <8 x i16>, <8 x i16>* %ptr2_i, align 1
+ %a = load <8 x i16>, ptr %ptr1_i, align 1
+ %b = load <8 x i16>, ptr %ptr2_i, align 1
%vabd = tail call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> %a, <8 x i16> %b)
%acc_next = add <8 x i16> %acc_phi, %vabd
%next_i = add i32 %i, 8
@@ -377,8 +377,8 @@ loop:
%acc_phi = phi <8 x i8> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
%ptr1_i = getelementptr i8, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i8, ptr %ptr2, i32 %i
- %a = load <8 x i8>, <8 x i8>* %ptr1_i, align 1
- %b = load <8 x i8>, <8 x i8>* %ptr2_i, align 1
+ %a = load <8 x i8>, ptr %ptr1_i, align 1
+ %b = load <8 x i8>, ptr %ptr2_i, align 1
%vabd = tail call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %a, <8 x i8> %b)
%acc_next = add <8 x i8> %acc_phi, %vabd
%next_i = add i32 %i, 8
@@ -411,8 +411,8 @@ loop:
%acc_phi = phi <4 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
%ptr1_i = getelementptr i16, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i16, ptr %ptr2, i32 %i
- %a = load <4 x i16>, <4 x i16>* %ptr1_i, align 1
- %b = load <4 x i16>, <4 x i16>* %ptr2_i, align 1
+ %a = load <4 x i16>, ptr %ptr1_i, align 1
+ %b = load <4 x i16>, ptr %ptr2_i, align 1
%vabd = tail call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %a, <4 x i16> %b)
%acc_next = add <4 x i16> %acc_phi, %vabd
%next_i = add i32 %i, 4
@@ -445,8 +445,8 @@ loop:
%acc_phi = phi <8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
%ptr1_i = getelementptr i16, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i16, ptr %ptr2, i32 %i
- %a = load <8 x i16>, <8 x i16>* %ptr1_i, align 1
- %b = load <8 x i16>, <8 x i16>* %ptr2_i, align 1
+ %a = load <8 x i16>, ptr %ptr1_i, align 1
+ %b = load <8 x i16>, ptr %ptr2_i, align 1
%vabd = tail call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> %a, <8 x i16> %b)
%acc_next = add <8 x i16> %acc_phi, %vabd
%next_i = add i32 %i, 8
@@ -480,8 +480,8 @@ loop:
%acc_phi = phi <8 x i16> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
%ptr1_i = getelementptr i8, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i8, ptr %ptr2, i32 %i
- %a = load <8 x i8>, <8 x i8>* %ptr1_i, align 1
- %b = load <8 x i8>, <8 x i8>* %ptr2_i, align 1
+ %a = load <8 x i8>, ptr %ptr1_i, align 1
+ %b = load <8 x i8>, ptr %ptr2_i, align 1
%vabd = tail call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %a, <8 x i8> %b)
%vmov = zext <8 x i8> %vabd to <8 x i16>
%acc_next = add <8 x i16> %vmov, %acc_phi
@@ -516,8 +516,8 @@ loop:
%acc_phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %acc_next, %loop ]
%ptr1_i = getelementptr i16, ptr %ptr1, i32 %i
%ptr2_i = getelementptr i16, ptr %ptr2, i32 %i
- %a = load <4 x i16>, <4 x i16>* %ptr1_i, align 1
- %b = load <4 x i16>, <4 x i16>* %ptr2_i, align 1
+ %a = load <4 x i16>, ptr %ptr1_i, align 1
+ %b = load <4 x i16>, ptr %ptr2_i, align 1
%vabd = tail call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %a, <4 x i16> %b)
%vmov = zext <4 x i16> %vabd to <4 x i32>
%acc_next = add <4 x i32> %vmov, %acc_phi
diff --git a/llvm/test/CodeGen/AArch64/cgdata-merge-local.ll b/llvm/test/CodeGen/AArch64/cgdata-merge-local.ll
index 608fe29e17398..d421b3f17caf8 100644
--- a/llvm/test/CodeGen/AArch64/cgdata-merge-local.ll
+++ b/llvm/test/CodeGen/AArch64/cgdata-merge-local.ll
@@ -54,9 +54,9 @@
define i32 @f1(i32 %a) {
entry:
%idxprom = sext i32 %a to i64
- %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* @g, i64 0, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
- %1 = load volatile i32, i32* @g1, align 4
+ %arrayidx = getelementptr inbounds [0 x i32], ptr @g, i64 0, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
+ %1 = load volatile i32, ptr @g1, align 4
%mul = mul nsw i32 %1, %0
%add = add nsw i32 %mul, 1
ret i32 %add
@@ -65,9 +65,9 @@ entry:
define i32 @f2(i32 %a) {
entry:
%idxprom = sext i32 %a to i64
- %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* @g, i64 0, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
- %1 = load volatile i32, i32* @g2, align 4
+ %arrayidx = getelementptr inbounds [0 x i32], ptr @g, i64 0, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
+ %1 = load volatile i32, ptr @g2, align 4
%mul = mul nsw i32 %1, %0
%add = add nsw i32 %mul, 1
ret i32 %add
diff --git a/llvm/test/CodeGen/AArch64/cgdata-merge-no-params.ll b/llvm/test/CodeGen/AArch64/cgdata-merge-no-params.ll
index 10f0e10f11d66..a9da1253de01d 100644
--- a/llvm/test/CodeGen/AArch64/cgdata-merge-no-params.ll
+++ b/llvm/test/CodeGen/AArch64/cgdata-merge-no-params.ll
@@ -19,9 +19,9 @@
define i32 @f1(i32 %a) {
entry:
%idxprom = sext i32 %a to i64
- %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* @g, i64 0, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
- %1 = load volatile i32, i32* @g1, align 4
+ %arrayidx = getelementptr inbounds [0 x i32], ptr @g, i64 0, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
+ %1 = load volatile i32, ptr @g1, align 4
%mul = mul nsw i32 %1, %0
%add = add nsw i32 %mul, 1
ret i32 %add
@@ -30,9 +30,9 @@ entry:
define i32 @f2(i32 %a) {
entry:
%idxprom = sext i32 %a to i64
- %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* @g, i64 0, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
- %1 = load volatile i32, i32* @g1, align 4
+ %arrayidx = getelementptr inbounds [0 x i32], ptr @g, i64 0, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
+ %1 = load volatile i32, ptr @g1, align 4
%mul = mul nsw i32 %1, %0
%add = add nsw i32 %mul, 1
ret i32 %add
diff --git a/llvm/test/CodeGen/AArch64/cgdata-no-merge-unnamed.ll b/llvm/test/CodeGen/AArch64/cgdata-no-merge-unnamed.ll
index 9986af7eb231c..7ab2aba8d75e2 100644
--- a/llvm/test/CodeGen/AArch64/cgdata-no-merge-unnamed.ll
+++ b/llvm/test/CodeGen/AArch64/cgdata-no-merge-unnamed.ll
@@ -12,9 +12,9 @@
define i32 @0(i32 %a) {
entry:
%idxprom = sext i32 %a to i64
- %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* @g, i64 0, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
- %1 = load volatile i32, i32* @g1, align 4
+ %arrayidx = getelementptr inbounds [0 x i32], ptr @g, i64 0, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
+ %1 = load volatile i32, ptr @g1, align 4
%mul = mul nsw i32 %1, %0
%add = add nsw i32 %mul, 1
ret i32 %add
@@ -23,9 +23,9 @@ entry:
define i32 @1(i32 %a) {
entry:
%idxprom = sext i32 %a to i64
- %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* @g, i64 0, i64 %idxprom
- %0 = load i32, i32* %arrayidx, align 4
- %1 = load volatile i32, i32* @g2, align 4
+ %arrayidx = getelementptr inbounds [0 x i32], ptr @g, i64 0, i64 %idxprom
+ %0 = load i32, ptr %arrayidx, align 4
+ %1 = load volatile i32, ptr @g2, align 4
%mul = mul nsw i32 %1, %0
%add = add nsw i32 %mul, 1
ret i32 %add
diff --git a/llvm/test/CodeGen/AArch64/divrem.ll b/llvm/test/CodeGen/AArch64/divrem.ll
index 5cd7e098d00bb..e3cbd17dc4c3f 100644
--- a/llvm/test/CodeGen/AArch64/divrem.ll
+++ b/llvm/test/CodeGen/AArch64/divrem.ll
@@ -2,7 +2,7 @@
; SDIVREM/UDIVREM DAG nodes are generated but expanded when lowering and
; should not generate select error.
-define <2 x i32> @test_udivrem(<2 x i32> %x, < 2 x i32> %y, < 2 x i32>* %z) {
+define <2 x i32> @test_udivrem(<2 x i32> %x, < 2 x i32> %y, ptr %z) {
; CHECK-LABEL: test_udivrem
; CHECK-DAG: udivrem
; CHECK-NOT: LLVM ERROR: Cannot select
@@ -12,10 +12,10 @@ define <2 x i32> @test_udivrem(<2 x i32> %x, < 2 x i32> %y, < 2 x i32>* %z) {
ret <2 x i32> %1
}
-define <4 x i32> @test_sdivrem(<4 x i32> %x, ptr %y) {
+define <4 x i32> @test_sdivrem(<4 x i32> %x, ptr %y) {
; CHECK-LABEL: test_sdivrem
; CHECK-DAG: sdivrem
- %div = sdiv <4 x i32> %x, < i32 20, i32 20, i32 20, i32 20 >
+ %div = sdiv <4 x i32> %x, < i32 20, i32 20, i32 20, i32 20 >
store <4 x i32> %div, ptr %y
%1 = srem <4 x i32> %x, < i32 20, i32 20, i32 20, i32 20 >
ret <4 x i32> %1
diff --git a/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll b/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll
index 91cf605613b9e..c0c8894ce1f6b 100644
--- a/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll
+++ b/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll
@@ -85,7 +85,7 @@ define i64 @test_ldrsw_ldursw(ptr %p) #0 {
; CHECK-NEXT: add.2d v0, v[[V0]], v[[V1]]
; CHECK-NEXT: ret
define <2 x i64> @test_ldrq_ldruq_invalidoffset(ptr %p) #0 {
- %tmp1 = load <2 x i64>, < 2 x i64>* %p, align 8
+ %tmp1 = load <2 x i64>, ptr %p, align 8
%add.ptr2 = getelementptr inbounds i64, ptr %p, i64 3
%tmp2 = load <2 x i64>, ptr %add.ptr2, align 8
%add = add nsw <2 x i64> %tmp1, %tmp2
diff --git a/llvm/test/CodeGen/AArch64/machine-outliner-iterative.mir b/llvm/test/CodeGen/AArch64/machine-outliner-iterative.mir
index b7fbdc09c1dd1..a635231fef7fb 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-iterative.mir
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-iterative.mir
@@ -6,9 +6,9 @@
#
#; define void @"$s12"(...) { define i64 @"$s5” (...) { define void @"$s13"(...) {
# ... ... ...
-# %8 = load i1, i1* %7 %8 = load i1, i1* %7
-# %9 = load i4, i4*, %6 %9 = load i4, i4*, %6 %9 = load i4, i4*, %6
-# store i4 %9, i4* %5 store i4 %9, i4* %5 store i4 %9, i4* %5
+# %8 = load i1, ptr %7 %8 = load i1, ptr %7
+# %9 = load i4, ptr, %6 %9 = load i4, ptr, %6 %9 = load i4, ptr, %6
+# store i4 %9, ptr %5 store i4 %9, ptr %5 store i4 %9, ptr %5
# ... ... ...
# } } }
#
@@ -16,7 +16,7 @@
#
# define void @"$s12"(...) { define i64 @"$s5” (...) { define void @"$s13"(...) {
# ... ... ...
-# %8 = load i1, i1* %7 %8 = load i1, i1* %7
+# %8 = load i1, ptr %7 %8 = load i1, ptr %7
# call void @outlined_function_1_1 call void @outlined_function_1_1 call void @outlined_function_1_1
# ... ... ...
# } } }
diff --git a/llvm/test/CodeGen/AArch64/misched-fusion-cmp-bcc.ll b/llvm/test/CodeGen/AArch64/misched-fusion-cmp-bcc.ll
index 700a060ef968f..0a10e80d998cd 100644
--- a/llvm/test/CodeGen/AArch64/misched-fusion-cmp-bcc.ll
+++ b/llvm/test/CodeGen/AArch64/misched-fusion-cmp-bcc.ll
@@ -15,10 +15,10 @@
; RUN: llc %s -o - -O0 -mtriple=aarch64-unknown -mcpu=ampere1b | FileCheck %s
-define void @test_cmp_bcc_fusion(i32 %x, i32 %y, i32* %arr) {
+define void @test_cmp_bcc_fusion(i32 %x, i32 %y, ptr %arr) {
entry:
%cmp = icmp eq i32 %x, %y
- store i32 %x, i32* %arr, align 4
+ store i32 %x, ptr %arr, align 4
br i1 %cmp, label %if_true, label %if_false
if_true:
diff --git a/llvm/test/CodeGen/AArch64/no-quad-ldp-stp.ll b/llvm/test/CodeGen/AArch64/no-quad-ldp-stp.ll
index b7dde881291bb..1a85f803b9e57 100644
--- a/llvm/test/CodeGen/AArch64/no-quad-ldp-stp.ll
+++ b/llvm/test/CodeGen/AArch64/no-quad-ldp-stp.ll
@@ -19,7 +19,7 @@ define void @test_nopair_st(ptr %ptr, <2 x double> %v1, <2 x double> %v2) {
; SLOW-NOT: ldp
; FAST: ldp
define <2 x i64> @test_nopair_ld(ptr %p) {
- %tmp1 = load <2 x i64>, < 2 x i64>* %p, align 8
+ %tmp1 = load <2 x i64>, ptr %p, align 8
%add.ptr2 = getelementptr inbounds i64, ptr %p, i64 2
%tmp2 = load <2 x i64>, ptr %add.ptr2, align 8
%add = add nsw <2 x i64> %tmp1, %tmp2
diff --git a/llvm/test/CodeGen/AArch64/ptrauth-bti-call.ll b/llvm/test/CodeGen/AArch64/ptrauth-bti-call.ll
index 0356a46ec1050..df5e1a9f1ee10 100644
--- a/llvm/test/CodeGen/AArch64/ptrauth-bti-call.ll
+++ b/llvm/test/CodeGen/AArch64/ptrauth-bti-call.ll
@@ -17,7 +17,7 @@
; CHECK-NEXT: bti c
; CHECK-NEXT: mov x16, x0
; CHECK-NEXT: braaz x16
-define i32 @test_tailcall_ia_0(i32 ()* %arg0) #0 {
+define i32 @test_tailcall_ia_0(ptr %arg0) #0 {
%tmp0 = tail call i32 %arg0() [ "ptrauth"(i32 0, i64 0) ]
ret i32 %tmp0
}
@@ -26,7 +26,7 @@ define i32 @test_tailcall_ia_0(i32 ()* %arg0) #0 {
; CHECK-NEXT: bti c
; CHECK-NEXT: mov x16, x0
; CHECK-NEXT: brabz x16
-define i32 @test_tailcall_ib_0(i32 ()* %arg0) #0 {
+define i32 @test_tailcall_ib_0(ptr %arg0) #0 {
%tmp0 = tail call i32 %arg0() [ "ptrauth"(i32 1, i64 0) ]
ret i32 %tmp0
}
@@ -36,7 +36,7 @@ define i32 @test_tailcall_ib_0(i32 ()* %arg0) #0 {
; CHECK-NEXT: mov x16, x0
; CHECK-NEXT: mov x17, #42
; CHECK-NEXT: braa x16, x17
-define i32 @test_tailcall_ia_imm(i32 ()* %arg0) #0 {
+define i32 @test_tailcall_ia_imm(ptr %arg0) #0 {
%tmp0 = tail call i32 %arg0() [ "ptrauth"(i32 0, i64 42) ]
ret i32 %tmp0
}
@@ -46,7 +46,7 @@ define i32 @test_tailcall_ia_imm(i32 ()* %arg0) #0 {
; CHECK-NEXT: mov x16, x0
; CHECK-NEXT: mov x17, #42
; CHECK-NEXT: brab x16, x17
-define i32 @test_tailcall_ib_imm(i32 ()* %arg0) #0 {
+define i32 @test_tailcall_ib_imm(ptr %arg0) #0 {
%tmp0 = tail call i32 %arg0() [ "ptrauth"(i32 1, i64 42) ]
ret i32 %tmp0
}
@@ -60,8 +60,8 @@ define i32 @test_tailcall_ib_imm(i32 ()* %arg0) #0 {
; ELF-NEXT: ldr x1, [x1]
; ELF-NEXT: mov x16, x0
; ELF-NEXT: braa x16, x1
-define i32 @test_tailcall_ia_var(i32 ()* %arg0, i64* %arg1) #0 {
- %tmp0 = load i64, i64* %arg1
+define i32 @test_tailcall_ia_var(ptr %arg0, ptr %arg1) #0 {
+ %tmp0 = load i64, ptr %arg1
%tmp1 = tail call i32 %arg0() [ "ptrauth"(i32 0, i64 %tmp0) ]
ret i32 %tmp1
}
@@ -75,8 +75,8 @@ define i32 @test_tailcall_ia_var(i32 ()* %arg0, i64* %arg1) #0 {
; ELF-NEXT: ldr x1, [x1]
; ELF-NEXT: mov x16, x0
; ELF-NEXT: brab x16, x1
-define i32 @test_tailcall_ib_var(i32 ()* %arg0, i64* %arg1) #0 {
- %tmp0 = load i64, i64* %arg1
+define i32 @test_tailcall_ib_var(ptr %arg0, ptr %arg1) #0 {
+ %tmp0 = load i64, ptr %arg1
%tmp1 = tail call i32 %arg0() [ "ptrauth"(i32 1, i64 %tmp0) ]
ret i32 %tmp1
}
@@ -85,7 +85,7 @@ define i32 @test_tailcall_ib_var(i32 ()* %arg0, i64* %arg1) #0 {
; CHECK-NEXT: bti c
; CHECK-NEXT: mov x16, x0
; CHECK-NEXT: braa x16, x1
-define i32 @test_tailcall_ia_arg(i32 ()* %arg0, i64 %arg1) #0 {
+define i32 @test_tailcall_ia_arg(ptr %arg0, i64 %arg1) #0 {
%tmp0 = tail call i32 %arg0() [ "ptrauth"(i32 0, i64 %arg1) ]
ret i32 %tmp0
}
@@ -94,7 +94,7 @@ define i32 @test_tailcall_ia_arg(i32 ()* %arg0, i64 %arg1) #0 {
; CHECK-NEXT: bti c
; CHECK-NEXT: mov x16, x0
; CHECK-NEXT: brab x16, x1
-define i32 @test_tailcall_ib_arg(i32 ()* %arg0, i64 %arg1) #0 {
+define i32 @test_tailcall_ib_arg(ptr %arg0, i64 %arg1) #0 {
%tmp0 = tail call i32 %arg0() [ "ptrauth"(i32 1, i64 %arg1) ]
ret i32 %tmp0
}
@@ -103,8 +103,8 @@ define ...
[truncated]
|
04a616e to
95bec2e
Compare
davemgreen
reviewed
Nov 11, 2025
Collaborator
davemgreen
left a comment
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I didnt think we had any of these left!
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.

No description provided.