Skip to content

Conversation

@arsenm
Copy link
Contributor

@arsenm arsenm commented Nov 21, 2025

The sub-dword cases just assert now, so comment those out.

Copy link
Contributor Author

arsenm commented Nov 21, 2025

@llvmbot
Copy link
Member

llvmbot commented Nov 21, 2025

@llvm/pr-subscribers-backend-amdgpu

Author: Matt Arsenault (arsenm)

Changes

The sub-dword cases just assert now, so comment those out.


Patch is 65.94 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/168976.diff

1 Files Affected:

  • (modified) llvm/test/CodeGen/AMDGPU/constant-address-space-32bit.ll (+1560-19)
diff --git a/llvm/test/CodeGen/AMDGPU/constant-address-space-32bit.ll b/llvm/test/CodeGen/AMDGPU/constant-address-space-32bit.ll
index 14056257665b4..6a241dfa463bd 100644
--- a/llvm/test/CodeGen/AMDGPU/constant-address-space-32bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/constant-address-space-32bit.ll
@@ -312,8 +312,8 @@ define amdgpu_vs <16 x float> @load_v16i32(ptr addrspace(6) inreg %p0, ptr addrs
   ret <16 x float> %r2
 }
 
-define amdgpu_vs float @load_float(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 {
-; GFX67-LABEL: load_float:
+define amdgpu_vs float @load_f32(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 {
+; GFX67-LABEL: load_f32:
 ; GFX67:       ; %bb.0:
 ; GFX67-NEXT:    s_mov_b32 s2, s1
 ; GFX67-NEXT:    s_mov_b32 s3, 0
@@ -325,7 +325,7 @@ define amdgpu_vs float @load_float(ptr addrspace(6) inreg %p0, ptr addrspace(6)
 ; GFX67-NEXT:    v_add_f32_e32 v0, s0, v0
 ; GFX67-NEXT:    ; return to shader part epilog
 ;
-; GFX8-LABEL: load_float:
+; GFX8-LABEL: load_f32:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_mov_b32 s2, s1
 ; GFX8-NEXT:    s_mov_b32 s3, 0
@@ -337,7 +337,7 @@ define amdgpu_vs float @load_float(ptr addrspace(6) inreg %p0, ptr addrspace(6)
 ; GFX8-NEXT:    v_add_f32_e32 v0, s0, v0
 ; GFX8-NEXT:    ; return to shader part epilog
 ;
-; GFX9-LABEL: load_float:
+; GFX9-LABEL: load_f32:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_mov_b32 s2, s1
 ; GFX9-NEXT:    s_mov_b32 s3, 0
@@ -355,8 +355,8 @@ define amdgpu_vs float @load_float(ptr addrspace(6) inreg %p0, ptr addrspace(6)
   ret float %r
 }
 
-define amdgpu_vs <2 x float> @load_v2float(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 {
-; GFX67-LABEL: load_v2float:
+define amdgpu_vs <2 x float> @load_v2f32(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 {
+; GFX67-LABEL: load_v2f32:
 ; GFX67:       ; %bb.0:
 ; GFX67-NEXT:    s_mov_b32 s3, 0
 ; GFX67-NEXT:    s_mov_b32 s2, s1
@@ -370,7 +370,7 @@ define amdgpu_vs <2 x float> @load_v2float(ptr addrspace(6) inreg %p0, ptr addrs
 ; GFX67-NEXT:    v_add_f32_e32 v1, s1, v1
 ; GFX67-NEXT:    ; return to shader part epilog
 ;
-; GFX8-LABEL: load_v2float:
+; GFX8-LABEL: load_v2f32:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_mov_b32 s3, 0
 ; GFX8-NEXT:    s_mov_b32 s2, s1
@@ -384,7 +384,7 @@ define amdgpu_vs <2 x float> @load_v2float(ptr addrspace(6) inreg %p0, ptr addrs
 ; GFX8-NEXT:    v_add_f32_e32 v1, s1, v1
 ; GFX8-NEXT:    ; return to shader part epilog
 ;
-; GFX9-LABEL: load_v2float:
+; GFX9-LABEL: load_v2f32:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_mov_b32 s2, s1
 ; GFX9-NEXT:    s_mov_b32 s3, 0
@@ -404,8 +404,8 @@ define amdgpu_vs <2 x float> @load_v2float(ptr addrspace(6) inreg %p0, ptr addrs
   ret <2 x float> %r
 }
 
-define amdgpu_vs <4 x float> @load_v4float(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 {
-; GFX67-LABEL: load_v4float:
+define amdgpu_vs <4 x float> @load_v4f32(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 {
+; GFX67-LABEL: load_v4f32:
 ; GFX67:       ; %bb.0:
 ; GFX67-NEXT:    s_mov_b32 s3, 0
 ; GFX67-NEXT:    s_mov_b32 s2, s1
@@ -423,7 +423,7 @@ define amdgpu_vs <4 x float> @load_v4float(ptr addrspace(6) inreg %p0, ptr addrs
 ; GFX67-NEXT:    v_add_f32_e32 v3, s3, v3
 ; GFX67-NEXT:    ; return to shader part epilog
 ;
-; GFX8-LABEL: load_v4float:
+; GFX8-LABEL: load_v4f32:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_mov_b32 s3, 0
 ; GFX8-NEXT:    s_mov_b32 s2, s1
@@ -441,7 +441,7 @@ define amdgpu_vs <4 x float> @load_v4float(ptr addrspace(6) inreg %p0, ptr addrs
 ; GFX8-NEXT:    v_add_f32_e32 v3, s3, v3
 ; GFX8-NEXT:    ; return to shader part epilog
 ;
-; GFX9-LABEL: load_v4float:
+; GFX9-LABEL: load_v4f32:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_mov_b32 s2, s1
 ; GFX9-NEXT:    s_mov_b32 s3, 0
@@ -465,8 +465,8 @@ define amdgpu_vs <4 x float> @load_v4float(ptr addrspace(6) inreg %p0, ptr addrs
   ret <4 x float> %r
 }
 
-define amdgpu_vs <8 x float> @load_v8float(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 {
-; GFX67-LABEL: load_v8float:
+define amdgpu_vs <8 x float> @load_v8f32(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 {
+; GFX67-LABEL: load_v8f32:
 ; GFX67:       ; %bb.0:
 ; GFX67-NEXT:    s_mov_b32 s2, s1
 ; GFX67-NEXT:    s_mov_b32 s3, 0
@@ -492,7 +492,7 @@ define amdgpu_vs <8 x float> @load_v8float(ptr addrspace(6) inreg %p0, ptr addrs
 ; GFX67-NEXT:    v_add_f32_e32 v7, s19, v7
 ; GFX67-NEXT:    ; return to shader part epilog
 ;
-; GFX89-LABEL: load_v8float:
+; GFX89-LABEL: load_v8f32:
 ; GFX89:       ; %bb.0:
 ; GFX89-NEXT:    s_mov_b32 s2, s1
 ; GFX89-NEXT:    s_mov_b32 s3, 0
@@ -524,8 +524,8 @@ define amdgpu_vs <8 x float> @load_v8float(ptr addrspace(6) inreg %p0, ptr addrs
   ret <8 x float> %r
 }
 
-define amdgpu_vs <16 x float> @load_v16float(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 {
-; GFX67-LABEL: load_v16float:
+define amdgpu_vs <16 x float> @load_v16f32(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 {
+; GFX67-LABEL: load_v16f32:
 ; GFX67:       ; %bb.0:
 ; GFX67-NEXT:    s_mov_b32 s3, 0
 ; GFX67-NEXT:    s_mov_b32 s2, s1
@@ -567,7 +567,7 @@ define amdgpu_vs <16 x float> @load_v16float(ptr addrspace(6) inreg %p0, ptr add
 ; GFX67-NEXT:    v_add_f32_e32 v15, s15, v15
 ; GFX67-NEXT:    ; return to shader part epilog
 ;
-; GFX8-LABEL: load_v16float:
+; GFX8-LABEL: load_v16f32:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_mov_b32 s3, 0
 ; GFX8-NEXT:    s_mov_b32 s2, s1
@@ -609,7 +609,7 @@ define amdgpu_vs <16 x float> @load_v16float(ptr addrspace(6) inreg %p0, ptr add
 ; GFX8-NEXT:    v_add_f32_e32 v15, s15, v15
 ; GFX8-NEXT:    ; return to shader part epilog
 ;
-; GFX9-LABEL: load_v16float:
+; GFX9-LABEL: load_v16f32:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_mov_b32 s2, s1
 ; GFX9-NEXT:    s_mov_b32 s3, 0
@@ -993,6 +993,1547 @@ main_body:
   ret float %tmp10
 }
 
+; define amdgpu_vs float @load_i8(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 {
+;   %gep1 = getelementptr inbounds i8, ptr addrspace(6) %p1, i32 2
+;   %r0 = load i8, ptr addrspace(6) %p0
+;   %r1 = load i8, ptr addrspace(6) %gep1
+;   %r = add i8 %r0, %r1
+;   %ext = zext i8 %r to i32
+;   %r2 = bitcast i32 %ext to float
+;   ret float %r2
+; }
+
+; define amdgpu_vs float @zextload_i8(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 {
+;   %gep1 = getelementptr inbounds i8, ptr addrspace(6) %p1, i32 2
+;   %r0 = load i8, ptr addrspace(6) %p0
+;   %r1 = load i8, ptr addrspace(6) %gep1
+;   %zext.r0 = zext i8 %r0 to i32
+;   %zext.r1 = zext i8 %r1 to i32
+;   %r = add i32 %zext.r0, %zext.r1
+;   %r2 = bitcast i32 %r to float
+;   ret float %r2
+; }
+
+; define amdgpu_vs float @sextload_i8(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 {
+;   %gep1 = getelementptr inbounds i8, ptr addrspace(6) %p1, i32 2
+;   %r0 = load i8, ptr addrspace(6) %p0
+;   %r1 = load i8, ptr addrspace(6) %gep1
+;   %zext.r0 = sext i8 %r0 to i32
+;   %zext.r1 = sext i8 %r1 to i32
+;   %r = add i32 %zext.r0, %zext.r1
+;   %r2 = bitcast i32 %r to float
+;   ret float %r2
+; }
+
+; define amdgpu_vs half @load_i16(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 {
+;   %gep1 = getelementptr inbounds i16, ptr addrspace(6) %p1, i32 2
+;   %r0 = load i16, ptr addrspace(6) %p0
+;   %r1 = load i16, ptr addrspace(6) %gep1
+;   %r = add i16 %r0, %r1
+;   %r2 = bitcast i16 %r to half
+;   ret half %r2
+; }
+
+; define amdgpu_vs half @load_i16_align4(ptr addrspace(6) inreg %ptr) #0 {
+;   %gep1 = getelementptr inbounds i16, ptr addrspace(6) %ptr, i32 2
+;   %ld = load i16, ptr addrspace(6) %gep1
+;   %cast = bitcast i16 %ld to half
+;   ret half %cast
+; }
+
+define amdgpu_vs <2 x half> @load_v2i16(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 {
+; GFX67-LABEL: load_v2i16:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_mov_b32 s2, s1
+; GFX67-NEXT:    s_mov_b32 s3, 0
+; GFX67-NEXT:    s_mov_b32 s1, s3
+; GFX67-NEXT:    s_load_dword s2, s[2:3], 0x2
+; GFX67-NEXT:    s_load_dword s0, s[0:1], 0x0
+; GFX67-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX67-NEXT:    s_lshr_b32 s1, s2, 16
+; GFX67-NEXT:    s_lshr_b32 s3, s0, 16
+; GFX67-NEXT:    s_add_i32 s3, s3, s1
+; GFX67-NEXT:    s_add_i32 s0, s0, s2
+; GFX67-NEXT:    v_cvt_f32_f16_e32 v0, s0
+; GFX67-NEXT:    v_cvt_f32_f16_e32 v1, s3
+; GFX67-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: load_v2i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_mov_b32 s2, s1
+; GFX8-NEXT:    s_mov_b32 s3, 0
+; GFX8-NEXT:    s_mov_b32 s1, s3
+; GFX8-NEXT:    s_load_dword s2, s[2:3], 0x8
+; GFX8-NEXT:    s_load_dword s0, s[0:1], 0x0
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    s_lshr_b32 s1, s2, 16
+; GFX8-NEXT:    s_lshr_b32 s3, s0, 16
+; GFX8-NEXT:    s_add_i32 s3, s3, s1
+; GFX8-NEXT:    s_add_i32 s0, s0, s2
+; GFX8-NEXT:    s_and_b32 s0, s0, 0xffff
+; GFX8-NEXT:    s_lshl_b32 s1, s3, 16
+; GFX8-NEXT:    s_or_b32 s0, s0, s1
+; GFX8-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: load_v2i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_mov_b32 s2, s1
+; GFX9-NEXT:    s_mov_b32 s3, 0
+; GFX9-NEXT:    s_mov_b32 s1, s3
+; GFX9-NEXT:    s_load_dword s4, s[2:3], 0x8
+; GFX9-NEXT:    s_load_dword s5, s[0:1], 0x0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s4
+; GFX9-NEXT:    v_pk_add_u16 v0, s5, v0
+; GFX9-NEXT:    ; return to shader part epilog
+  %gep1 = getelementptr inbounds <2 x i16>, ptr addrspace(6) %p1, i32 2
+  %r0 = load <2 x i16>, ptr addrspace(6) %p0
+  %r1 = load <2 x i16>, ptr addrspace(6) %gep1
+  %r = add <2 x i16> %r0, %r1
+  %r2 = bitcast <2 x i16> %r to <2 x half>
+  ret <2 x half> %r2
+}
+
+define amdgpu_vs <3 x half> @load_v3i16(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 {
+; GFX67-LABEL: load_v3i16:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_mov_b32 s3, 0
+; GFX67-NEXT:    s_mov_b32 s2, s1
+; GFX67-NEXT:    s_mov_b32 s1, s3
+; GFX67-NEXT:    s_load_dwordx2 s[2:3], s[2:3], 0x4
+; GFX67-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x0
+; GFX67-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX67-NEXT:    s_lshr_b32 s4, s2, 16
+; GFX67-NEXT:    s_lshr_b32 s5, s0, 16
+; GFX67-NEXT:    s_add_i32 s5, s5, s4
+; GFX67-NEXT:    s_add_i32 s1, s1, s3
+; GFX67-NEXT:    s_add_i32 s0, s0, s2
+; GFX67-NEXT:    v_cvt_f32_f16_e32 v0, s0
+; GFX67-NEXT:    v_cvt_f32_f16_e32 v1, s5
+; GFX67-NEXT:    v_cvt_f32_f16_e32 v2, s1
+; GFX67-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: load_v3i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_mov_b32 s3, 0
+; GFX8-NEXT:    s_mov_b32 s2, s1
+; GFX8-NEXT:    s_mov_b32 s1, s3
+; GFX8-NEXT:    s_load_dwordx2 s[2:3], s[2:3], 0x10
+; GFX8-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x0
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    s_lshr_b32 s4, s2, 16
+; GFX8-NEXT:    s_add_i32 s1, s1, s3
+; GFX8-NEXT:    s_lshr_b32 s3, s0, 16
+; GFX8-NEXT:    s_add_i32 s3, s3, s4
+; GFX8-NEXT:    s_add_i32 s0, s0, s2
+; GFX8-NEXT:    s_and_b32 s0, s0, 0xffff
+; GFX8-NEXT:    s_lshl_b32 s2, s3, 16
+; GFX8-NEXT:    s_or_b32 s0, s0, s2
+; GFX8-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: load_v3i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_mov_b32 s2, s1
+; GFX9-NEXT:    s_mov_b32 s3, 0
+; GFX9-NEXT:    s_mov_b32 s1, s3
+; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[2:3], 0x10
+; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s4
+; GFX9-NEXT:    v_mov_b32_e32 v1, s5
+; GFX9-NEXT:    v_pk_add_u16 v0, s6, v0
+; GFX9-NEXT:    v_pk_add_u16 v1, s7, v1
+; GFX9-NEXT:    ; return to shader part epilog
+  %gep1 = getelementptr inbounds <3 x i16>, ptr addrspace(6) %p1, i32 2
+  %r0 = load <3 x i16>, ptr addrspace(6) %p0
+  %r1 = load <3 x i16>, ptr addrspace(6) %gep1
+  %r = add <3 x i16> %r0, %r1
+  %r2 = bitcast <3 x i16> %r to <3 x half>
+  ret <3 x half> %r2
+}
+
+define amdgpu_vs <4 x half> @load_v4i16(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 {
+; GFX67-LABEL: load_v4i16:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_mov_b32 s3, 0
+; GFX67-NEXT:    s_mov_b32 s2, s1
+; GFX67-NEXT:    s_mov_b32 s1, s3
+; GFX67-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x0
+; GFX67-NEXT:    s_load_dwordx2 s[2:3], s[2:3], 0x4
+; GFX67-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX67-NEXT:    s_lshr_b32 s4, s0, 16
+; GFX67-NEXT:    s_lshr_b32 s5, s1, 16
+; GFX67-NEXT:    s_lshr_b32 s6, s2, 16
+; GFX67-NEXT:    s_lshr_b32 s7, s3, 16
+; GFX67-NEXT:    s_add_i32 s5, s5, s7
+; GFX67-NEXT:    s_add_i32 s4, s4, s6
+; GFX67-NEXT:    s_add_i32 s1, s1, s3
+; GFX67-NEXT:    s_add_i32 s0, s0, s2
+; GFX67-NEXT:    v_cvt_f32_f16_e32 v0, s0
+; GFX67-NEXT:    v_cvt_f32_f16_e32 v1, s4
+; GFX67-NEXT:    v_cvt_f32_f16_e32 v2, s1
+; GFX67-NEXT:    v_cvt_f32_f16_e32 v3, s5
+; GFX67-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: load_v4i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_mov_b32 s3, 0
+; GFX8-NEXT:    s_mov_b32 s2, s1
+; GFX8-NEXT:    s_mov_b32 s1, s3
+; GFX8-NEXT:    s_load_dwordx2 s[2:3], s[2:3], 0x10
+; GFX8-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x0
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    s_lshr_b32 s4, s3, 16
+; GFX8-NEXT:    s_lshr_b32 s5, s1, 16
+; GFX8-NEXT:    s_add_i32 s5, s5, s4
+; GFX8-NEXT:    s_add_i32 s1, s1, s3
+; GFX8-NEXT:    s_lshr_b32 s3, s2, 16
+; GFX8-NEXT:    s_lshr_b32 s4, s0, 16
+; GFX8-NEXT:    s_add_i32 s4, s4, s3
+; GFX8-NEXT:    s_add_i32 s0, s0, s2
+; GFX8-NEXT:    s_and_b32 s0, s0, 0xffff
+; GFX8-NEXT:    s_lshl_b32 s2, s4, 16
+; GFX8-NEXT:    s_or_b32 s0, s0, s2
+; GFX8-NEXT:    s_and_b32 s1, s1, 0xffff
+; GFX8-NEXT:    s_lshl_b32 s2, s5, 16
+; GFX8-NEXT:    s_or_b32 s1, s1, s2
+; GFX8-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: load_v4i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_mov_b32 s2, s1
+; GFX9-NEXT:    s_mov_b32 s3, 0
+; GFX9-NEXT:    s_mov_b32 s1, s3
+; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[2:3], 0x10
+; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s4
+; GFX9-NEXT:    v_mov_b32_e32 v1, s5
+; GFX9-NEXT:    v_pk_add_u16 v0, s6, v0
+; GFX9-NEXT:    v_pk_add_u16 v1, s7, v1
+; GFX9-NEXT:    ; return to shader part epilog
+  %gep1 = getelementptr inbounds <4 x i16>, ptr addrspace(6) %p1, i32 2
+  %r0 = load <4 x i16>, ptr addrspace(6) %p0
+  %r1 = load <4 x i16>, ptr addrspace(6) %gep1
+  %r = add <4 x i16> %r0, %r1
+  %r2 = bitcast <4 x i16> %r to <4 x half>
+  ret <4 x half> %r2
+}
+
+define amdgpu_vs <6 x half> @load_v6i16(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 {
+; GFX67-LABEL: load_v6i16:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_mov_b32 s5, 0
+; GFX67-NEXT:    s_mov_b32 s4, s1
+; GFX67-NEXT:    s_mov_b32 s1, s5
+; GFX67-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x0
+; GFX67-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x8
+; GFX67-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX67-NEXT:    s_lshr_b32 s3, s0, 16
+; GFX67-NEXT:    s_lshr_b32 s7, s1, 16
+; GFX67-NEXT:    s_lshr_b32 s8, s2, 16
+; GFX67-NEXT:    s_lshr_b32 s9, s4, 16
+; GFX67-NEXT:    s_lshr_b32 s10, s5, 16
+; GFX67-NEXT:    s_lshr_b32 s11, s6, 16
+; GFX67-NEXT:    s_add_i32 s8, s8, s11
+; GFX67-NEXT:    s_add_i32 s7, s7, s10
+; GFX67-NEXT:    s_add_i32 s3, s3, s9
+; GFX67-NEXT:    s_add_i32 s2, s2, s6
+; GFX67-NEXT:    s_add_i32 s1, s1, s5
+; GFX67-NEXT:    s_add_i32 s0, s0, s4
+; GFX67-NEXT:    v_cvt_f32_f16_e32 v0, s0
+; GFX67-NEXT:    v_cvt_f32_f16_e32 v2, s1
+; GFX67-NEXT:    v_cvt_f32_f16_e32 v4, s2
+; GFX67-NEXT:    v_cvt_f32_f16_e32 v1, s3
+; GFX67-NEXT:    v_cvt_f32_f16_e32 v3, s7
+; GFX67-NEXT:    v_cvt_f32_f16_e32 v5, s8
+; GFX67-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: load_v6i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_mov_b32 s3, 0
+; GFX8-NEXT:    s_mov_b32 s2, s1
+; GFX8-NEXT:    s_mov_b32 s1, s3
+; GFX8-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x20
+; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x0
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    s_lshr_b32 s3, s6, 16
+; GFX8-NEXT:    s_lshr_b32 s7, s2, 16
+; GFX8-NEXT:    s_add_i32 s7, s7, s3
+; GFX8-NEXT:    s_add_i32 s2, s2, s6
+; GFX8-NEXT:    s_lshr_b32 s3, s5, 16
+; GFX8-NEXT:    s_lshr_b32 s6, s1, 16
+; GFX8-NEXT:    s_add_i32 s6, s6, s3
+; GFX8-NEXT:    s_add_i32 s1, s1, s5
+; GFX8-NEXT:    s_lshr_b32 s3, s4, 16
+; GFX8-NEXT:    s_lshr_b32 s5, s0, 16
+; GFX8-NEXT:    s_add_i32 s5, s5, s3
+; GFX8-NEXT:    s_add_i32 s0, s0, s4
+; GFX8-NEXT:    s_and_b32 s0, s0, 0xffff
+; GFX8-NEXT:    s_lshl_b32 s3, s5, 16
+; GFX8-NEXT:    s_or_b32 s0, s0, s3
+; GFX8-NEXT:    s_and_b32 s1, s1, 0xffff
+; GFX8-NEXT:    s_lshl_b32 s3, s6, 16
+; GFX8-NEXT:    s_or_b32 s1, s1, s3
+; GFX8-NEXT:    s_and_b32 s2, s2, 0xffff
+; GFX8-NEXT:    s_lshl_b32 s3, s7, 16
+; GFX8-NEXT:    s_or_b32 s2, s2, s3
+; GFX8-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8-NEXT:    v_mov_b32_e32 v1, s1
+; GFX8-NEXT:    v_mov_b32_e32 v2, s2
+; GFX8-NEXT:    ; return to shader part epilog
+;
+; GFX9-LABEL: load_v6i16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_mov_b32 s2, s1
+; GFX9-NEXT:    s_mov_b32 s3, 0
+; GFX9-NEXT:    s_mov_b32 s1, s3
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x20
+; GFX9-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0x0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s4
+; GFX9-NEXT:    v_mov_b32_e32 v1, s5
+; GFX9-NEXT:    v_mov_b32_e32 v2, s6
+; GFX9-NEXT:    v_pk_add_u16 v0, s8, v0
+; GFX9-NEXT:    v_pk_add_u16 v1, s9, v1
+; GFX9-NEXT:    v_pk_add_u16 v2, s10, v2
+; GFX9-NEXT:    ; return to shader part epilog
+  %gep1 = getelementptr inbounds <6 x i16>, ptr addrspace(6) %p1, i32 2
+  %r0 = load <6 x i16>, ptr addrspace(6) %p0
+  %r1 = load <6 x i16>, ptr addrspace(6) %gep1
+  %r = add <6 x i16> %r0, %r1
+  %r2 = bitcast <6 x i16> %r to <6 x half>
+  ret <6 x half> %r2
+}
+
+define amdgpu_vs <8 x half> @load_v8i16(ptr addrspace(6) inreg %p0, ptr addrspace(6) inreg %p1) #0 {
+; GFX67-LABEL: load_v8i16:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_mov_b32 s5, 0
+; GFX67-NEXT:    s_mov_b32 s4, s1
+; GFX67-NEXT:    s_mov_b32 s1, s5
+; GFX67-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x0
+; GFX67-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x8
+; GFX67-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX67-NEXT:    s_lshr_b32 s8, s0, 16
+; GFX67-NEXT:    s_lshr_b32 s9, s1, 16
+; GFX67-NEXT:    s_lshr_b32 s10, s2, 16
+; GFX67-NEXT:    s_lshr_b32 s11, s3, 16
+; GFX67-NEXT:    s_lshr_b32 s12, s4, 16
+; GFX67-NEXT:    s_lshr_b32 s13, s5, 16
+; GFX67-NEXT:    s_lshr_b32 s14, s6, 16
+; GFX67-NEXT:    s_lshr_b32 s15, s7, 16
+; GFX67-NEXT:    s_add_i32 s11, s11, s15
+; GFX67-NEXT:    s_add_i32 s10, s10, s14
+; GFX67-NEXT:    s_add_i32 s9, s9, s13
+; GFX67-NEXT:    s_add_i32 s8, s8, s12
+; GFX67-NEXT:    s_add_i32 s3, s3, s7
+; GFX67-NEXT:    s_add_i32 s2, s2, s6
+; GFX67-NEXT:    s_add_i32 s1, s1, s5
+; GFX67-NEXT:    s_add_i32 s0, s0, s4
+; GFX67-NEXT:    v_cvt_f32_f16_e32 v0, s0
+; GFX67-NEXT:    v_cvt_f32_f16_e32 v2, s1
+; GFX67-NEXT:    v_cvt_f32_f16_e32 v4, s2
+; GFX67-NEXT:    v_cvt_f32_f16_e32 v6, s3
+; GFX67-NEXT:    v_cvt_f32_f16_e32 v1, s8
+; GFX67-NEXT:    v_cvt_f32_f16_e32 v3, s9
+; GFX67-NEXT:    v_cvt_f32_f16_e32 v5, s10
+; GFX67-NEXT:    v_cvt_f32_f16_e32 v7, s11
+; GFX67-NEXT:    ; return to shader part epilog
+;
+; GFX8-LABEL: load_v8i16:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_mov_b32 s3, 0
+; GFX8-NEXT:    s_mov_b32 s2, s1
+; GFX8-NEXT:    s_mov_b32 s1, s3
+; GFX8-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x20
+; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x0
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    s_lshr_b32 s8, s7, 16
+; GFX8-NEXT:    s_lshr_b32 s9, s3, 16
+; GFX8-NEXT:    s_add_i32 s9, s9, s8
+; GFX8-NEXT:    s_add_i32 s3, s3, s7
+; GFX8-NEXT:    s_lshr_b32 s7, s6, 16
+; GFX8-NEXT:    s_lshr_b32 s8, s2, 16
+; GFX8-NEXT:    s_add_i32 s8, s8, s7
+; GFX8-NEXT:    s_add_i32 s2, s2, s6
+; GFX8-NEXT:    s_lshr_b32 s6, s5, 16
+; GFX8-NEXT:    s_lshr_b32 s7, s1, 16
+; GFX8-NEXT:    s_add_i32 s7, s7, s6
+; GFX8-NEXT:    s_add_i32 s1, s1, s5
+; GFX8-NEXT:    s_lshr_b32 s5, s4, 16
+; GFX8-NEXT:    s_lshr_b32 s6, s0, 16
+; GFX8-NEXT:    s_add_i32 s6, s6, s5
+; GFX8-NEXT:    s_add_i32 s0, s0, s4
+; GFX8-NEXT:    s_and_b32 s0, s0, 0xffff
+; GFX8-NEXT:    s_lshl_b32 s4, s6, 16
+; GFX8-NEXT:    s_or_b32 s0, s0, s4
+; GFX8-NEXT:    s_and_b32 s1, s1, 0xffff
+; GFX8-N...
[truncated]

@arsenm arsenm marked this pull request as ready for review November 21, 2025 00:10
Base automatically changed from users/arsenm/amdgpu/convert-constant-address-space-32-bit-test-generated-checks to main November 21, 2025 00:45
@github-actions
Copy link

🐧 Linux x64 Test Results

  • 186427 tests passed
  • 4868 tests skipped

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Projects

None yet

Development

Successfully merging this pull request may close these issues.

3 participants