Skip to content

Conversation

RKSimon
Copy link
Collaborator

@RKSimon RKSimon commented Sep 18, 2025

No description provided.

@RKSimon RKSimon enabled auto-merge (squash) September 18, 2025 08:58
@llvmbot
Copy link
Member

llvmbot commented Sep 18, 2025

@llvm/pr-subscribers-backend-amdgpu

Author: Simon Pilgrim (RKSimon)

Changes

Patch is 29.44 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/159526.diff

1 Files Affected:

  • (modified) llvm/test/CodeGen/AMDGPU/kernel-argument-dag-lowering.ll (+445-111)
diff --git a/llvm/test/CodeGen/AMDGPU/kernel-argument-dag-lowering.ll b/llvm/test/CodeGen/AMDGPU/kernel-argument-dag-lowering.ll
index 9601162685f2c..2fa865ff4929c 100644
--- a/llvm/test/CodeGen/AMDGPU/kernel-argument-dag-lowering.ll
+++ b/llvm/test/CodeGen/AMDGPU/kernel-argument-dag-lowering.ll
@@ -1,89 +1,200 @@
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx900 -amdgpu-ir-lower-kernel-arguments=0 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,HSA-VI,FUNC %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx900 -amdgpu-ir-lower-kernel-arguments=0 < %s | FileCheck -enable-var-scope -check-prefixes=GCN %s
 
 ; Repeat of some problematic tests in kernel-args.ll, with the IR
 ; argument lowering pass disabled. Struct padding needs to be
 ; accounted for, as well as legalization of types changing offsets.
 
-; FUNC-LABEL: {{^}}i1_arg:
-
-; GCN: s_load_dword s
-; GCN: s_and_b32
-
-; HSA-VI: .amdhsa_kernarg_size 12
 define amdgpu_kernel void @i1_arg(ptr addrspace(1) %out, i1 %x) #0 {
+; GCN-LABEL: i1_arg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dword s2, s[8:9], 0x8
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GCN-NEXT:    v_mov_b32_e32 v0, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_and_b32 s2, s2, 1
+; GCN-NEXT:    v_mov_b32_e32 v1, s2
+; GCN-NEXT:    global_store_byte v0, v1, s[0:1]
+; GCN-NEXT:    s_endpgm
   store i1 %x, ptr addrspace(1) %out, align 1
   ret void
 }
+; GCN: .amdhsa_kernarg_size 12
 
-; FUNC-LABEL: {{^}}v3i8_arg:
-
-; HSA-VI: s_load_dword s{{[0-9]+}}, s[8:9], 0x8
-; HSA-VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[8:9], 0x0
-
-; HSA-VI: .amdhsa_kernarg_size 12
 define amdgpu_kernel void @v3i8_arg(ptr addrspace(1) nocapture %out, <3 x i8> %in) #0 {
+; GCN-LABEL: v3i8_arg:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dword s2, s[8:9], 0x8
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GCN-NEXT:    v_mov_b32_e32 v0, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v1, s2
+; GCN-NEXT:    global_store_byte_d16_hi v0, v1, s[0:1] offset:2
+; GCN-NEXT:    global_store_short v0, v1, s[0:1]
+; GCN-NEXT:    s_endpgm
 entry:
   store <3 x i8> %in, ptr addrspace(1) %out, align 4
   ret void
 }
+; GCN: .amdhsa_kernarg_size 12
 
-; FUNC-LABEL: {{^}}v5i8_arg:
-; GCN: s_load_dwordx2 s[0:1], s[8:9], 0x0
 define amdgpu_kernel void @v5i8_arg(<5 x i8> %in) nounwind {
+; GCN-LABEL: v5i8_arg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GCN-NEXT:    v_mov_b32_e32 v0, 4
+; GCN-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_lshr_b32 s3, s0, 24
+; GCN-NEXT:    s_and_b32 s2, s0, 0xffff
+; GCN-NEXT:    s_bfe_u32 s0, s0, 0x80010
+; GCN-NEXT:    v_mov_b32_e32 v2, s1
+; GCN-NEXT:    s_lshl_b32 s1, s3, 8
+; GCN-NEXT:    s_or_b32 s0, s0, s1
+; GCN-NEXT:    s_lshl_b32 s0, s0, 16
+; GCN-NEXT:    s_or_b32 s0, s2, s0
+; GCN-NEXT:    global_store_byte v[0:1], v2, off
+; GCN-NEXT:    v_mov_b32_e32 v0, 0
+; GCN-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-NEXT:    v_mov_b32_e32 v2, s0
+; GCN-NEXT:    global_store_dword v[0:1], v2, off
+; GCN-NEXT:    s_endpgm
   store <5 x i8> %in, ptr addrspace(1) null
   ret void
 }
 
-; FUNC-LABEL: {{^}}v6i8_arg:
-; GCN: s_load_dwordx2 s[0:1], s[8:9], 0x0
 define amdgpu_kernel void @v6i8_arg(<6 x i8> %in) nounwind {
+; GCN-LABEL: v6i8_arg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GCN-NEXT:    v_mov_b32_e32 v0, 4
+; GCN-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_lshr_b32 s3, s0, 24
+; GCN-NEXT:    s_and_b32 s2, s0, 0xffff
+; GCN-NEXT:    s_bfe_u32 s0, s0, 0x80010
+; GCN-NEXT:    v_mov_b32_e32 v2, s1
+; GCN-NEXT:    s_lshl_b32 s1, s3, 8
+; GCN-NEXT:    s_or_b32 s0, s0, s1
+; GCN-NEXT:    s_lshl_b32 s0, s0, 16
+; GCN-NEXT:    s_or_b32 s0, s2, s0
+; GCN-NEXT:    global_store_short v[0:1], v2, off
+; GCN-NEXT:    v_mov_b32_e32 v0, 0
+; GCN-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-NEXT:    v_mov_b32_e32 v2, s0
+; GCN-NEXT:    global_store_dword v[0:1], v2, off
+; GCN-NEXT:    s_endpgm
   store <6 x i8> %in, ptr addrspace(1) null
   ret void
 }
 
-; FUNC-LABEL: {{^}}v5i16_arg:
-; GCN: s_load_dwordx4 s[0:3], s[8:9], 0x0
 define amdgpu_kernel void @v5i16_arg(<5 x i16> %in) nounwind {
+; GCN-LABEL: v5i16_arg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[8:9], 0x0
+; GCN-NEXT:    v_mov_b32_e32 v0, 8
+; GCN-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v6, s2
+; GCN-NEXT:    v_mov_b32_e32 v4, s0
+; GCN-NEXT:    v_mov_b32_e32 v5, s1
+; GCN-NEXT:    global_store_short v[0:1], v6, off
+; GCN-NEXT:    global_store_dwordx2 v[2:3], v[4:5], off
+; GCN-NEXT:    s_endpgm
   store <5 x i16> %in, ptr addrspace(1) null
   ret void
 }
 
-; FUNC-LABEL: {{^}}v6i16_arg:
-; GCN-DAG: s_load_dwordx4 s[0:3], s[8:9], 0x0
 define amdgpu_kernel void @v6i16_arg(<6 x i16> %in) nounwind {
+; GCN-LABEL: v6i16_arg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[8:9], 0x0
+; GCN-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    v_mov_b32_e32 v2, s2
+; GCN-NEXT:    global_store_dwordx3 v[3:4], v[0:2], off
+; GCN-NEXT:    s_endpgm
   store <6 x i16> %in, ptr addrspace(1) null
   ret void
 }
 
-; FUNC-LABEL: {{^}}v5i32_arg:
-; GCN: s_load_dwordx4 s[0:3], s[8:9], 0x0
 define amdgpu_kernel void @v5i32_arg(<5 x i32> %in) nounwind {
+; GCN-LABEL: v5i32_arg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dword s4, s[8:9], 0x10
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[8:9], 0x0
+; GCN-NEXT:    v_mov_b32_e32 v4, 16
+; GCN-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v8, s4
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    v_mov_b32_e32 v2, s2
+; GCN-NEXT:    v_mov_b32_e32 v3, s3
+; GCN-NEXT:    global_store_dword v[4:5], v8, off
+; GCN-NEXT:    global_store_dwordx4 v[6:7], v[0:3], off
+; GCN-NEXT:    s_endpgm
   store <5 x i32> %in, ptr addrspace(1) null
   ret void
 }
 
-; FUNC-LABEL: {{^}}v6i32_arg:
-; GCN: s_load_dwordx4 s[0:3], s[8:9], 0x0
 define amdgpu_kernel void @v6i32_arg(<6 x i32> %in) nounwind {
+; GCN-LABEL: v6i32_arg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dwordx2 s[4:5], s[8:9], 0x10
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[8:9], 0x0
+; GCN-NEXT:    v_mov_b32_e32 v4, 16
+; GCN-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v9, s5
+; GCN-NEXT:    v_mov_b32_e32 v8, s4
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    v_mov_b32_e32 v2, s2
+; GCN-NEXT:    v_mov_b32_e32 v3, s3
+; GCN-NEXT:    global_store_dwordx2 v[4:5], v[8:9], off
+; GCN-NEXT:    global_store_dwordx4 v[6:7], v[0:3], off
+; GCN-NEXT:    s_endpgm
   store <6 x i32> %in, ptr addrspace(1) null
   ret void
 }
 
-; FUNC-LABEL: {{^}}i65_arg:
-; HSA-VI: s_load_dwordx4 s{{\[[0-9]+:[0-9]+\]}}, s[8:9], 0x0
-
-; HSA-VI: .amdhsa_kernarg_size 24
 define amdgpu_kernel void @i65_arg(ptr addrspace(1) nocapture %out, i65 %in) #0 {
+; GCN-LABEL: i65_arg:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dword s4, s[8:9], 0x10
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[8:9], 0x0
+; GCN-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_and_b32 s4, s4, 1
+; GCN-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-NEXT:    v_mov_b32_e32 v3, s4
+; GCN-NEXT:    v_mov_b32_e32 v1, s3
+; GCN-NEXT:    global_store_byte v2, v3, s[0:1] offset:8
+; GCN-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GCN-NEXT:    s_endpgm
 entry:
   store i65 %in, ptr addrspace(1) %out, align 4
   ret void
 }
+; GCN: .amdhsa_kernarg_size 24
 
-; FUNC-LABEL: {{^}}empty_struct_arg:
-; HSA-VI: .amdhsa_kernarg_size 0
 define amdgpu_kernel void @empty_struct_arg({} %in) #0 {
+; GCN-LABEL: empty_struct_arg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_endpgm
   ret void
 }
+; GCN: .amdhsa_kernarg_size 0
 
 ; The correct load offsets for these:
 ; load 4 from 0,
@@ -95,14 +206,31 @@ define amdgpu_kernel void @empty_struct_arg({} %in) #0 {
 ; struct members is not properly considered, making these wrong.
 
 ; FIXME: Total argument size is computed wrong
-; FUNC-LABEL: {{^}}struct_argument_alignment:
-; HSA-VI: s_load_dword s{{[0-9]+}}, s[8:9], 0x0
-; HSA-VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[8:9], 0x8
-; HSA-VI: s_load_dword s{{[0-9]+}}, s[8:9], 0x18
-; HSA-VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[8:9], 0x20
-
-; HSA-VI: .amdhsa_kernarg_size 40
 define amdgpu_kernel void @struct_argument_alignment({i32, i64} %arg0, i8, {i32, i64} %arg1) #0 {
+; GCN-LABEL: struct_argument_alignment:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dword s4, s[8:9], 0x0
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x8
+; GCN-NEXT:    s_load_dword s5, s[8:9], 0x18
+; GCN-NEXT:    s_load_dwordx2 s[2:3], s[8:9], 0x20
+; GCN-NEXT:    v_mov_b32_e32 v0, 0
+; GCN-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v2, s4
+; GCN-NEXT:    global_store_dword v[0:1], v2, off
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v3, s1
+; GCN-NEXT:    v_mov_b32_e32 v2, s0
+; GCN-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v2, s5
+; GCN-NEXT:    global_store_dword v[0:1], v2, off
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v2, s2
+; GCN-NEXT:    v_mov_b32_e32 v3, s3
+; GCN-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    s_endpgm
   %val0 = extractvalue {i32, i64} %arg0, 0
   %val1 = extractvalue {i32, i64} %arg0, 1
   %val2 = extractvalue {i32, i64} %arg1, 0
@@ -113,18 +241,33 @@ define amdgpu_kernel void @struct_argument_alignment({i32, i64} %arg0, i8, {i32,
   store volatile i64 %val3, ptr addrspace(1) null
   ret void
 }
+; GCN: .amdhsa_kernarg_size 40
 
 ; No padding between i8 and next struct, but round up at end to 4 byte
 ; multiple.
-; FUNC-LABEL: {{^}}packed_struct_argument_alignment:
-; HSA-VI-DAG: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0{{$}}
-; HSA-VI: global_load_dword v{{[0-9]+}}, [[ZERO]], s{{\[[0-9]+:[0-9]+\]}} offset:13
-; HSA-VI: global_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[ZERO]], s{{\[[0-9]+:[0-9]+\]}} offset:17
-; HSA-VI: s_load_dword s{{[0-9]+}}, s[8:9], 0x0
-; HSA-VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[8:9], 0x4
-
-; HSA-VI: .amdhsa_kernarg_size 28
 define amdgpu_kernel void @packed_struct_argument_alignment(<{i32, i64}> %arg0, i8, <{i32, i64}> %arg1) #0 {
+; GCN-LABEL: packed_struct_argument_alignment:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-NEXT:    global_load_dword v6, v2, s[8:9] offset:13
+; GCN-NEXT:    global_load_dwordx2 v[0:1], v2, s[8:9] offset:17
+; GCN-NEXT:    s_load_dword s2, s[8:9], 0x0
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x4
+; GCN-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v7, s2
+; GCN-NEXT:    v_mov_b32_e32 v5, s1
+; GCN-NEXT:    v_mov_b32_e32 v4, s0
+; GCN-NEXT:    global_store_dword v[2:3], v7, off
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    global_store_dwordx2 v[2:3], v[4:5], off
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    global_store_dword v[2:3], v6, off
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    global_store_dwordx2 v[2:3], v[0:1], off
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    s_endpgm
   %val0 = extractvalue <{i32, i64}> %arg0, 0
   %val1 = extractvalue <{i32, i64}> %arg0, 1
   %val2 = extractvalue <{i32, i64}> %arg1, 0
@@ -135,16 +278,40 @@ define amdgpu_kernel void @packed_struct_argument_alignment(<{i32, i64}> %arg0,
   store volatile i64 %val3, ptr addrspace(1) null
   ret void
 }
+; GCN: .amdhsa_kernarg_size 28
 
-; GCN-LABEL: {{^}}struct_argument_alignment_after:
-; HSA-VI: s_load_dword s{{[0-9]+}}, s[8:9], 0x0
-; HSA-VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[8:9], 0x8
-; HSA-VI: s_load_dword s{{[0-9]+}}, s[8:9], 0x18
-; HSA-VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[8:9], 0x20
-; HSA-VI: s_load_dwordx4 s{{\[[0-9]+:[0-9]+\]}}, s[8:9], 0x30
-
-; HSA-VI: .amdhsa_kernarg_size 64
 define amdgpu_kernel void @struct_argument_alignment_after({i32, i64} %arg0, i8, {i32, i64} %arg2, i8, <4 x i32> %arg4) #0 {
+; GCN-LABEL: struct_argument_alignment_after:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dword s10, s[8:9], 0x0
+; GCN-NEXT:    s_load_dwordx2 s[4:5], s[8:9], 0x8
+; GCN-NEXT:    s_load_dword s11, s[8:9], 0x18
+; GCN-NEXT:    s_load_dwordx2 s[6:7], s[8:9], 0x20
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[8:9], 0x30
+; GCN-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, s10
+; GCN-NEXT:    global_store_dword v[4:5], v0, off
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    v_mov_b32_e32 v1, s5
+; GCN-NEXT:    global_store_dwordx2 v[4:5], v[0:1], off
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-NEXT:    global_store_dword v[4:5], v0, off
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-NEXT:    v_mov_b32_e32 v1, s7
+; GCN-NEXT:    global_store_dwordx2 v[4:5], v[0:1], off
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    v_mov_b32_e32 v2, s2
+; GCN-NEXT:    v_mov_b32_e32 v3, s3
+; GCN-NEXT:    global_store_dwordx4 v[4:5], v[0:3], off
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    s_endpgm
   %val0 = extractvalue {i32, i64} %arg0, 0
   %val1 = extractvalue {i32, i64} %arg0, 1
   %val2 = extractvalue {i32, i64} %arg2, 0
@@ -156,137 +323,287 @@ define amdgpu_kernel void @struct_argument_alignment_after({i32, i64} %arg0, i8,
   store volatile <4 x i32> %arg4, ptr addrspace(1) null
   ret void
 }
+; GCN: .amdhsa_kernarg_size 64
 
-; GCN-LABEL: {{^}}array_3xi32:
-; HSA-VI: s_load_dwordx4 s{{\[[0-9]+:[0-9]+\]}}, s[8:9], 0x0
 define amdgpu_kernel void @array_3xi32(i16 %arg0, [3 x i32] %arg1) {
+; GCN-LABEL: array_3xi32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[8:9], 0x0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s3
+; GCN-NEXT:    v_mov_b32_e32 v2, s2
+; GCN-NEXT:    global_store_short v[0:1], v0, off
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    global_store_dword v[0:1], v1, off
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    global_store_dword v[0:1], v2, off
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, s1
+; GCN-NEXT:    global_store_dword v[0:1], v0, off
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    s_endpgm
   store volatile i16 %arg0, ptr addrspace(1) poison
   store volatile [3 x i32] %arg1, ptr addrspace(1) poison
   ret void
 }
 
-; GCN-LABEL: {{^}}array_3xi16:
-; HSA-VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[8:9], 0x0
 define amdgpu_kernel void @array_3xi16(i8 %arg0, [3 x i16] %arg1) {
+; GCN-LABEL: array_3xi16:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    global_store_byte v[0:1], v0, off
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    global_store_short_d16_hi v[0:1], v1, off
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    global_store_short v[0:1], v1, off
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    global_store_short_d16_hi v[0:1], v0, off
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    s_endpgm
   store volatile i8 %arg0, ptr addrspace(1) poison
   store volatile [3 x i16] %arg1, ptr addrspace(1) poison
   ret void
 }
 
-; GCN-LABEL: {{^}}v2i15_arg:
-; GCN: s_load_dword [[DWORD:s[0-9]+]]
-; GCN-DAG: s_bfe_u32 [[BFE:s[0-9]+]], [[DWORD]], 0x100010{{$}}
-; GCN-DAG: s_and_b32 [[AND:s[0-9]+]], [[DWORD]], 0x7fff{{$}}
 define amdgpu_kernel void @v2i15_arg(ptr addrspace(1) nocapture %out, <2 x i15> %in) #0 {
+; GCN-LABEL: v2i15_arg:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dword s2, s[8:9], 0x8
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GCN-NEXT:    v_mov_b32_e32 v0, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_and_b32 s3, s2, 0x7fff
+; GCN-NEXT:    s_bfe_u32 s2, s2, 0x100010
+; GCN-NEXT:    s_lshl_b32 s2, s2, 15
+; GCN-NEXT:    s_or_b32 s2, s3, s2
+; GCN-NEXT:    s_andn2_b32 s2, s2, -2.0
+; GCN-NEXT:    v_mov_b32_e32 v1, s2
+; GCN-NEXT:    global_store_dword v0, v1, s[0:1]
+; GCN-NEXT:    s_endpgm
 entry:
   store <2 x i15> %in, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; GCN-LABEL: {{^}}v3i15_arg:
-; GCN: s_load_dwordx4 [[DWORDX4:s\[[0-9]+:[0-9]+\]]]
-; GCN: s_lshl_b64
-; GCN: s_and_b32
-; GCN: s_and_b32
-; GCN: s_or_b32
 define amdgpu_kernel void @v3i15_arg(ptr addrspace(1) nocapture %out, <3 x i15> %in) #0 {
+; GCN-LABEL: v3i15_arg:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[8:9], 0x0
+; GCN-NEXT:    v_mov_b32_e32 v0, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_and_b32 s4, s3, 0xffff
+; GCN-NEXT:    s_and_b32 s5, s2, 0x7fff
+; GCN-NEXT:    s_lshr_b32 s6, s2, 1
+; GCN-NEXT:    s_lshl_b64 s[2:3], s[4:5], 30
+; GCN-NEXT:    s_and_b32 s4, s6, 0x3fff8000
+; GCN-NEXT:    s_and_b32 s6, s3, 0x1fff
+; GCN-NEXT:    s_or_b32 s4, s5, s4
+; GCN-NEXT:    s_mov_b32 s5, 0
+; GCN-NEXT:    v_mov_b32_e32 v1, s6
+; GCN-NEXT:    s_or_b64 s[2:3], s[4:5], s[2:3]
+; GCN-NEXT:    global_store_short v0, v1, s[0:1] offset:4
+; GCN-NEXT:    v_mov_b32_e32 v1, s2
+; GCN-NEXT:    global_store_dword v0, v1, s[0:1]
+; GCN-NEXT:    s_endpgm
 entry:
   store <3 x i15> %in, ptr addrspace(1) %out, align 4
   ret void
 }
 
 ; Byref pointers should only be treated as offsets from kernarg
-; GCN-LABEL: {{^}}byref_constant_i8_arg:
-; GCN: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0{{$}}
-; GCN: global_load_ubyte v{{[0-9]+}}, [[ZERO]], s[8:9] offset:8
-; GCN: .amdhsa_kernarg_size 12
 define amdgpu_kernel void @byref_constant_i8_arg(ptr addrspace(1) nocapture %out, ptr addrspace(4) byref(i8) %in.byref) #0 {
+; GCN-LABEL: byref_constant_i8_arg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_mov_b32_e32 v0, 0
+; GCN-NEXT:    global_load_ubyte v1, v0, s[8:9] offset:8
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GCN-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN-NEXT:    global_store_dword v0, v1, s[0:1]
+; GCN-NEXT:    s_endpgm
   %in = load i8, ptr addrspace(4) %in.byref
   %ext = zext i8 %in to i32
   store i32 %ext, ptr addrspace(1) %out, align 4
   ret void
 }
-
-; GCN-LABEL: {{^}}byref_constant_i16_arg:
-; GCN: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0{{$}}
-; GCN: global_load_ushort v{{[0-9]+}}, [[ZERO]], s[8:9] offset:8
 ; GCN: .amdhsa_kernarg_size 12
+
+
 define amdgpu_kernel void @byref_constant_i16_arg(ptr addrspace(1) nocapture %out, ptr addrspace(4) byref(i16) %in.byref) #0 {
+; GCN-LABEL: byref_constant_i16_arg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_mov_b32_e32 v0, 0
+; GCN-NEXT:    global_load_ushort v1, v0, s[8:9] offset:8
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GCN-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN-NEXT:    global_store_dword v0, v1, s[0:1]
+; GCN-NEXT:    s_endpgm
   %in = load i16, ptr addrspace(4) %in.byref
   %ext = zext i16 %in to i32
   store i32 %ext, ptr addrspace(1) %out, align 4
   ret void
 }
+; GCN: .amdhsa_kernarg_size 12
 
-; GCN-LABEL: {{^}}byref_constant_i32_arg:
-; GCN: s_load_dwordx4 [[LOAD:s\[[0-9]+:[0-9]+\]]], s[8:9], 0x0{{$}}
-; GCN: .amdhsa_kernarg_size 16
 define amdgpu_kernel void @byref_constant_i32_arg(ptr addrspace(1) nocapture %out, ptr addrspace(4) byref(i32) %in.byref, i32 %after.offset) #0 {
+; GCN-LABEL: byref_constant_i32_arg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[8:9], 0x0
+; GCN-NEXT:    v_mov_b32_e32 v0, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v1, s2
+; GCN-NEXT:    v_mov_b32_e32 v2, s3
+; GCN-NEXT:    global_store_dword v0, v1, s[0:1]
+; GCN-NEXT:    s_waitcnt vmcn...
[truncated]

@RKSimon RKSimon merged commit 8552760 into llvm:main Sep 18, 2025
11 checks passed
@RKSimon RKSimon deleted the amdgpu-kernel-test-regen branch September 18, 2025 10:52
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

2 participants