diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPreloadKernArgProlog.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPreloadKernArgProlog.cpp index 40094518dce0a..90c4f4e6680c2 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUPreloadKernArgProlog.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPreloadKernArgProlog.cpp @@ -109,7 +109,7 @@ AMDGPUPreloadKernArgProlog::AMDGPUPreloadKernArgProlog(MachineFunction &MF) TRI(*ST.getRegisterInfo()) {} bool AMDGPUPreloadKernArgProlog::run() { - if (!ST.hasKernargPreload()) + if (!ST.needsKernArgPreloadProlog()) return false; unsigned NumKernArgPreloadSGPRs = MFI.getNumKernargPreloadedSGPRs(); diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h index f47ddf5d93ec3..3d6bfbea7727e 100644 --- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h +++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h @@ -1573,6 +1573,12 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo, // extended VA to 57 bits. bool hasGetPCZeroExtension() const { return GFX12Insts && !GFX1250Insts; } + // \returns true if the target needs to create a prolog for backward + // compatibility when preloading kernel arguments. + bool needsKernArgPreloadProlog() const { + return hasKernargPreload() && !GFX1250Insts; + } + /// \returns SGPR allocation granularity supported by the subtarget. unsigned getSGPRAllocGranule() const { return AMDGPU::IsaInfo::getSGPRAllocGranule(this); diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.sat.pk.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.sat.pk.ll index 3a5507063b834..57967bc1650fe 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.sat.pk.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.sat.pk.ll @@ -16,7 +16,7 @@ define amdgpu_kernel void @sat_pk4_i4_i8_f32_v(i32 %src, ptr %out) #1 { ; SDAG-REAL16-NEXT: v_mov_b32_e32 v1, 0 ; SDAG-REAL16-NEXT: s_wait_kmcnt 0x0 ; SDAG-REAL16-NEXT: v_sat_pk4_i4_i8_e32 v0.l, s2 -; SDAG-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1] +; SDAG-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1] scope:SCOPE_SE ; SDAG-REAL16-NEXT: s_endpgm ; ; SDAG-FAKE16-LABEL: sat_pk4_i4_i8_f32_v: @@ -27,7 +27,7 @@ define amdgpu_kernel void @sat_pk4_i4_i8_f32_v(i32 %src, ptr %out) #1 { ; SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, 0 ; SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0 ; SDAG-FAKE16-NEXT: v_sat_pk4_i4_i8_e32 v1, s2 -; SDAG-FAKE16-NEXT: flat_store_b16 v0, v1, s[0:1] +; SDAG-FAKE16-NEXT: flat_store_b16 v0, v1, s[0:1] scope:SCOPE_SE ; SDAG-FAKE16-NEXT: s_endpgm ; ; GISEL-REAL16-LABEL: sat_pk4_i4_i8_f32_v: @@ -38,7 +38,7 @@ define amdgpu_kernel void @sat_pk4_i4_i8_f32_v(i32 %src, ptr %out) #1 { ; GISEL-REAL16-NEXT: v_mov_b32_e32 v1, 0 ; GISEL-REAL16-NEXT: s_wait_kmcnt 0x0 ; GISEL-REAL16-NEXT: v_sat_pk4_i4_i8_e32 v0.l, s2 -; GISEL-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1] +; GISEL-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1] scope:SCOPE_SE ; GISEL-REAL16-NEXT: s_endpgm ; ; GISEL-FAKE16-LABEL: sat_pk4_i4_i8_f32_v: @@ -49,7 +49,7 @@ define amdgpu_kernel void @sat_pk4_i4_i8_f32_v(i32 %src, ptr %out) #1 { ; GISEL-FAKE16-NEXT: v_mov_b32_e32 v1, 0 ; GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0 ; GISEL-FAKE16-NEXT: v_sat_pk4_i4_i8_e32 v0, s2 -; GISEL-FAKE16-NEXT: flat_store_b16 v1, v0, s[0:1] +; GISEL-FAKE16-NEXT: flat_store_b16 v1, v0, s[0:1] scope:SCOPE_SE ; GISEL-FAKE16-NEXT: s_endpgm %cvt = call i16 @llvm.amdgcn.sat.pk4.i4.i8(i32 %src) #0 store i16 %cvt, ptr %out, align 2 @@ -58,33 +58,21 @@ define amdgpu_kernel void @sat_pk4_i4_i8_f32_v(i32 %src, ptr %out) #1 { define amdgpu_kernel void @sat_pk4_i4_i8_f32_s(i32 inreg %src, ptr %out) #1 { ; SDAG-REAL16-LABEL: sat_pk4_i4_i8_f32_s: -; SDAG-REAL16: ; %bb.1: -; SDAG-REAL16-NEXT: s_load_b32 s8, s[4:5], 0x0 -; SDAG-REAL16-NEXT: s_waitcnt lgkmcnt(0) -; SDAG-REAL16-NEXT: s_branch .LBB1_0 -; SDAG-REAL16-NEXT: .p2align 8 -; SDAG-REAL16-NEXT: ; %bb.2: -; SDAG-REAL16-NEXT: .LBB1_0: +; SDAG-REAL16: ; %bb.0: ; SDAG-REAL16-NEXT: s_load_b64 s[0:1], s[4:5], 0x8 ; SDAG-REAL16-NEXT: v_sat_pk4_i4_i8_e32 v0.l, s8 ; SDAG-REAL16-NEXT: v_mov_b32_e32 v1, 0 ; SDAG-REAL16-NEXT: s_wait_kmcnt 0x0 -; SDAG-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1] +; SDAG-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1] scope:SCOPE_SE ; SDAG-REAL16-NEXT: s_endpgm ; ; SDAG-FAKE16-LABEL: sat_pk4_i4_i8_f32_s: -; SDAG-FAKE16: ; %bb.1: -; SDAG-FAKE16-NEXT: s_load_b32 s8, s[4:5], 0x0 -; SDAG-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; SDAG-FAKE16-NEXT: s_branch .LBB1_0 -; SDAG-FAKE16-NEXT: .p2align 8 -; SDAG-FAKE16-NEXT: ; %bb.2: -; SDAG-FAKE16-NEXT: .LBB1_0: +; SDAG-FAKE16: ; %bb.0: ; SDAG-FAKE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x8 ; SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, 0 ; SDAG-FAKE16-NEXT: v_sat_pk4_i4_i8_e32 v1, s8 ; SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0 -; SDAG-FAKE16-NEXT: flat_store_b16 v0, v1, s[0:1] +; SDAG-FAKE16-NEXT: flat_store_b16 v0, v1, s[0:1] scope:SCOPE_SE ; SDAG-FAKE16-NEXT: s_endpgm ; ; GISEL-REAL16-LABEL: sat_pk4_i4_i8_f32_s: @@ -95,7 +83,7 @@ define amdgpu_kernel void @sat_pk4_i4_i8_f32_s(i32 inreg %src, ptr %out) #1 { ; GISEL-REAL16-NEXT: v_mov_b32_e32 v1, 0 ; GISEL-REAL16-NEXT: s_wait_kmcnt 0x0 ; GISEL-REAL16-NEXT: v_sat_pk4_i4_i8_e32 v0.l, s2 -; GISEL-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1] +; GISEL-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1] scope:SCOPE_SE ; GISEL-REAL16-NEXT: s_endpgm ; ; GISEL-FAKE16-LABEL: sat_pk4_i4_i8_f32_s: @@ -106,7 +94,7 @@ define amdgpu_kernel void @sat_pk4_i4_i8_f32_s(i32 inreg %src, ptr %out) #1 { ; GISEL-FAKE16-NEXT: v_mov_b32_e32 v1, 0 ; GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0 ; GISEL-FAKE16-NEXT: v_sat_pk4_i4_i8_e32 v0, s2 -; GISEL-FAKE16-NEXT: flat_store_b16 v1, v0, s[0:1] +; GISEL-FAKE16-NEXT: flat_store_b16 v1, v0, s[0:1] scope:SCOPE_SE ; GISEL-FAKE16-NEXT: s_endpgm %cvt = call i16 @llvm.amdgcn.sat.pk4.i4.i8(i32 %src) #0 store i16 %cvt, ptr %out, align 2 @@ -120,7 +108,7 @@ define amdgpu_kernel void @sat_pk4_i4_i8_f32_i(ptr %out) #1 { ; SDAG-REAL16-NEXT: v_sat_pk4_i4_i8_e32 v0.l, 0x64 ; SDAG-REAL16-NEXT: v_mov_b32_e32 v1, 0 ; SDAG-REAL16-NEXT: s_wait_kmcnt 0x0 -; SDAG-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1] +; SDAG-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1] scope:SCOPE_SE ; SDAG-REAL16-NEXT: s_endpgm ; ; SDAG-FAKE16-LABEL: sat_pk4_i4_i8_f32_i: @@ -129,7 +117,7 @@ define amdgpu_kernel void @sat_pk4_i4_i8_f32_i(ptr %out) #1 { ; SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, 0 ; SDAG-FAKE16-NEXT: v_sat_pk4_i4_i8_e32 v1, 0x64 ; SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0 -; SDAG-FAKE16-NEXT: flat_store_b16 v0, v1, s[0:1] +; SDAG-FAKE16-NEXT: flat_store_b16 v0, v1, s[0:1] scope:SCOPE_SE ; SDAG-FAKE16-NEXT: s_endpgm ; ; GISEL-REAL16-LABEL: sat_pk4_i4_i8_f32_i: @@ -138,7 +126,7 @@ define amdgpu_kernel void @sat_pk4_i4_i8_f32_i(ptr %out) #1 { ; GISEL-REAL16-NEXT: v_sat_pk4_i4_i8_e32 v0.l, 0x64 ; GISEL-REAL16-NEXT: v_mov_b32_e32 v1, 0 ; GISEL-REAL16-NEXT: s_wait_kmcnt 0x0 -; GISEL-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1] +; GISEL-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1] scope:SCOPE_SE ; GISEL-REAL16-NEXT: s_endpgm ; ; GISEL-FAKE16-LABEL: sat_pk4_i4_i8_f32_i: @@ -147,7 +135,7 @@ define amdgpu_kernel void @sat_pk4_i4_i8_f32_i(ptr %out) #1 { ; GISEL-FAKE16-NEXT: v_sat_pk4_i4_i8_e32 v0, 0x64 ; GISEL-FAKE16-NEXT: v_mov_b32_e32 v1, 0 ; GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0 -; GISEL-FAKE16-NEXT: flat_store_b16 v1, v0, s[0:1] +; GISEL-FAKE16-NEXT: flat_store_b16 v1, v0, s[0:1] scope:SCOPE_SE ; GISEL-FAKE16-NEXT: s_endpgm %cvt = call i16 @llvm.amdgcn.sat.pk4.i4.i8(i32 100) #0 store i16 %cvt, ptr %out, align 2 @@ -163,7 +151,7 @@ define amdgpu_kernel void @sat_pk4_u4_u8_f32_v(i32 %src, ptr %out) #1 { ; SDAG-REAL16-NEXT: v_mov_b32_e32 v1, 0 ; SDAG-REAL16-NEXT: s_wait_kmcnt 0x0 ; SDAG-REAL16-NEXT: v_sat_pk4_u4_u8_e32 v0.l, s2 -; SDAG-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1] +; SDAG-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1] scope:SCOPE_SE ; SDAG-REAL16-NEXT: s_endpgm ; ; SDAG-FAKE16-LABEL: sat_pk4_u4_u8_f32_v: @@ -174,7 +162,7 @@ define amdgpu_kernel void @sat_pk4_u4_u8_f32_v(i32 %src, ptr %out) #1 { ; SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, 0 ; SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0 ; SDAG-FAKE16-NEXT: v_sat_pk4_u4_u8_e32 v1, s2 -; SDAG-FAKE16-NEXT: flat_store_b16 v0, v1, s[0:1] +; SDAG-FAKE16-NEXT: flat_store_b16 v0, v1, s[0:1] scope:SCOPE_SE ; SDAG-FAKE16-NEXT: s_endpgm ; ; GISEL-REAL16-LABEL: sat_pk4_u4_u8_f32_v: @@ -185,7 +173,7 @@ define amdgpu_kernel void @sat_pk4_u4_u8_f32_v(i32 %src, ptr %out) #1 { ; GISEL-REAL16-NEXT: v_mov_b32_e32 v1, 0 ; GISEL-REAL16-NEXT: s_wait_kmcnt 0x0 ; GISEL-REAL16-NEXT: v_sat_pk4_u4_u8_e32 v0.l, s2 -; GISEL-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1] +; GISEL-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1] scope:SCOPE_SE ; GISEL-REAL16-NEXT: s_endpgm ; ; GISEL-FAKE16-LABEL: sat_pk4_u4_u8_f32_v: @@ -196,7 +184,7 @@ define amdgpu_kernel void @sat_pk4_u4_u8_f32_v(i32 %src, ptr %out) #1 { ; GISEL-FAKE16-NEXT: v_mov_b32_e32 v1, 0 ; GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0 ; GISEL-FAKE16-NEXT: v_sat_pk4_u4_u8_e32 v0, s2 -; GISEL-FAKE16-NEXT: flat_store_b16 v1, v0, s[0:1] +; GISEL-FAKE16-NEXT: flat_store_b16 v1, v0, s[0:1] scope:SCOPE_SE ; GISEL-FAKE16-NEXT: s_endpgm %cvt = call i16 @llvm.amdgcn.sat.pk4.u4.u8(i32 %src) #0 store i16 %cvt, ptr %out, align 2 @@ -205,33 +193,21 @@ define amdgpu_kernel void @sat_pk4_u4_u8_f32_v(i32 %src, ptr %out) #1 { define amdgpu_kernel void @sat_pk4_u4_u8_f32_s(i32 inreg %src, ptr %out) #1 { ; SDAG-REAL16-LABEL: sat_pk4_u4_u8_f32_s: -; SDAG-REAL16: ; %bb.1: -; SDAG-REAL16-NEXT: s_load_b32 s8, s[4:5], 0x0 -; SDAG-REAL16-NEXT: s_waitcnt lgkmcnt(0) -; SDAG-REAL16-NEXT: s_branch .LBB4_0 -; SDAG-REAL16-NEXT: .p2align 8 -; SDAG-REAL16-NEXT: ; %bb.2: -; SDAG-REAL16-NEXT: .LBB4_0: +; SDAG-REAL16: ; %bb.0: ; SDAG-REAL16-NEXT: s_load_b64 s[0:1], s[4:5], 0x8 ; SDAG-REAL16-NEXT: v_sat_pk4_u4_u8_e32 v0.l, s8 ; SDAG-REAL16-NEXT: v_mov_b32_e32 v1, 0 ; SDAG-REAL16-NEXT: s_wait_kmcnt 0x0 -; SDAG-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1] +; SDAG-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1] scope:SCOPE_SE ; SDAG-REAL16-NEXT: s_endpgm ; ; SDAG-FAKE16-LABEL: sat_pk4_u4_u8_f32_s: -; SDAG-FAKE16: ; %bb.1: -; SDAG-FAKE16-NEXT: s_load_b32 s8, s[4:5], 0x0 -; SDAG-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; SDAG-FAKE16-NEXT: s_branch .LBB4_0 -; SDAG-FAKE16-NEXT: .p2align 8 -; SDAG-FAKE16-NEXT: ; %bb.2: -; SDAG-FAKE16-NEXT: .LBB4_0: +; SDAG-FAKE16: ; %bb.0: ; SDAG-FAKE16-NEXT: s_load_b64 s[0:1], s[4:5], 0x8 ; SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, 0 ; SDAG-FAKE16-NEXT: v_sat_pk4_u4_u8_e32 v1, s8 ; SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0 -; SDAG-FAKE16-NEXT: flat_store_b16 v0, v1, s[0:1] +; SDAG-FAKE16-NEXT: flat_store_b16 v0, v1, s[0:1] scope:SCOPE_SE ; SDAG-FAKE16-NEXT: s_endpgm ; ; GISEL-REAL16-LABEL: sat_pk4_u4_u8_f32_s: @@ -242,7 +218,7 @@ define amdgpu_kernel void @sat_pk4_u4_u8_f32_s(i32 inreg %src, ptr %out) #1 { ; GISEL-REAL16-NEXT: v_mov_b32_e32 v1, 0 ; GISEL-REAL16-NEXT: s_wait_kmcnt 0x0 ; GISEL-REAL16-NEXT: v_sat_pk4_u4_u8_e32 v0.l, s2 -; GISEL-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1] +; GISEL-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1] scope:SCOPE_SE ; GISEL-REAL16-NEXT: s_endpgm ; ; GISEL-FAKE16-LABEL: sat_pk4_u4_u8_f32_s: @@ -253,7 +229,7 @@ define amdgpu_kernel void @sat_pk4_u4_u8_f32_s(i32 inreg %src, ptr %out) #1 { ; GISEL-FAKE16-NEXT: v_mov_b32_e32 v1, 0 ; GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0 ; GISEL-FAKE16-NEXT: v_sat_pk4_u4_u8_e32 v0, s2 -; GISEL-FAKE16-NEXT: flat_store_b16 v1, v0, s[0:1] +; GISEL-FAKE16-NEXT: flat_store_b16 v1, v0, s[0:1] scope:SCOPE_SE ; GISEL-FAKE16-NEXT: s_endpgm %cvt = call i16 @llvm.amdgcn.sat.pk4.u4.u8(i32 %src) #0 store i16 %cvt, ptr %out, align 2 @@ -267,7 +243,7 @@ define amdgpu_kernel void @sat_pk4_u4_u8_f32_i(ptr %out) #1 { ; SDAG-REAL16-NEXT: v_sat_pk4_u4_u8_e32 v0.l, 0x64 ; SDAG-REAL16-NEXT: v_mov_b32_e32 v1, 0 ; SDAG-REAL16-NEXT: s_wait_kmcnt 0x0 -; SDAG-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1] +; SDAG-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1] scope:SCOPE_SE ; SDAG-REAL16-NEXT: s_endpgm ; ; SDAG-FAKE16-LABEL: sat_pk4_u4_u8_f32_i: @@ -276,7 +252,7 @@ define amdgpu_kernel void @sat_pk4_u4_u8_f32_i(ptr %out) #1 { ; SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, 0 ; SDAG-FAKE16-NEXT: v_sat_pk4_u4_u8_e32 v1, 0x64 ; SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0 -; SDAG-FAKE16-NEXT: flat_store_b16 v0, v1, s[0:1] +; SDAG-FAKE16-NEXT: flat_store_b16 v0, v1, s[0:1] scope:SCOPE_SE ; SDAG-FAKE16-NEXT: s_endpgm ; ; GISEL-REAL16-LABEL: sat_pk4_u4_u8_f32_i: @@ -285,7 +261,7 @@ define amdgpu_kernel void @sat_pk4_u4_u8_f32_i(ptr %out) #1 { ; GISEL-REAL16-NEXT: v_sat_pk4_u4_u8_e32 v0.l, 0x64 ; GISEL-REAL16-NEXT: v_mov_b32_e32 v1, 0 ; GISEL-REAL16-NEXT: s_wait_kmcnt 0x0 -; GISEL-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1] +; GISEL-REAL16-NEXT: flat_store_b16 v1, v0, s[0:1] scope:SCOPE_SE ; GISEL-REAL16-NEXT: s_endpgm ; ; GISEL-FAKE16-LABEL: sat_pk4_u4_u8_f32_i: @@ -294,7 +270,7 @@ define amdgpu_kernel void @sat_pk4_u4_u8_f32_i(ptr %out) #1 { ; GISEL-FAKE16-NEXT: v_sat_pk4_u4_u8_e32 v0, 0x64 ; GISEL-FAKE16-NEXT: v_mov_b32_e32 v1, 0 ; GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0 -; GISEL-FAKE16-NEXT: flat_store_b16 v1, v0, s[0:1] +; GISEL-FAKE16-NEXT: flat_store_b16 v1, v0, s[0:1] scope:SCOPE_SE ; GISEL-FAKE16-NEXT: s_endpgm %cvt = call i16 @llvm.amdgcn.sat.pk4.u4.u8(i32 100) #0 store i16 %cvt, ptr %out, align 2 diff --git a/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll b/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll index 79b531e3ce785..c87f723086a41 100644 --- a/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll +++ b/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx942 < %s | FileCheck -check-prefixes=GFX942 %s ; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx90a < %s | FileCheck -check-prefixes=GFX90a %s +; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250 %s define amdgpu_kernel void @preload_block_count_x(ptr addrspace(1) inreg %out) #0 { ; GFX942-LABEL: preload_block_count_x: @@ -30,6 +31,12 @@ define amdgpu_kernel void @preload_block_count_x(ptr addrspace(1) inreg %out) #0 ; GFX90a-NEXT: v_mov_b32_e32 v1, s10 ; GFX90a-NEXT: global_store_dword v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: preload_block_count_x: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4 +; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] +; GFX1250-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %load = load i32, ptr addrspace(4) %imp_arg_ptr store i32 %load, ptr addrspace(1) %out @@ -65,6 +72,12 @@ define amdgpu_kernel void @preload_unused_arg_block_count_x(ptr addrspace(1) inr ; GFX90a-NEXT: v_mov_b32_e32 v1, s12 ; GFX90a-NEXT: global_store_dword v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: preload_unused_arg_block_count_x: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s6 +; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] +; GFX1250-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %load = load i32, ptr addrspace(4) %imp_arg_ptr store i32 %load, ptr addrspace(1) %out @@ -101,6 +114,14 @@ define amdgpu_kernel void @no_free_sgprs_block_count_x(ptr addrspace(1) inreg %o ; GFX90a-NEXT: v_mov_b32_e32 v1, s0 ; GFX90a-NEXT: global_store_dword v0, v1, s[14:15] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: no_free_sgprs_block_count_x: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x28 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[8:9] +; GFX1250-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %load = load i32, ptr addrspace(4) %imp_arg_ptr store i32 %load, ptr addrspace(1) %out @@ -127,6 +148,14 @@ define amdgpu_kernel void @no_inreg_block_count_x(ptr addrspace(1) %out) #0 { ; GFX90a-NEXT: v_mov_b32_e32 v1, s2 ; GFX90a-NEXT: global_store_dword v0, v1, s[0:1] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: no_inreg_block_count_x: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %load = load i32, ptr addrspace(4) %imp_arg_ptr store i32 %load, ptr addrspace(1) %out @@ -156,6 +185,16 @@ define amdgpu_kernel void @mixed_inreg_block_count_x(ptr addrspace(1) %out, i32 ; GFX90a-NEXT: v_mov_b32_e32 v1, s2 ; GFX90a-NEXT: global_store_dword v0, v1, s[0:1] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: mixed_inreg_block_count_x: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b32 s2, s[0:1], 0x10 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_load_b64 s[0:1], s[0:1], 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %load = load i32, ptr addrspace(4) %imp_arg_ptr store i32 %load, ptr addrspace(1) %out @@ -192,6 +231,15 @@ define amdgpu_kernel void @incorrect_type_i64_block_count_x(ptr addrspace(1) inr ; GFX90a-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1] ; GFX90a-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: incorrect_type_i64_block_count_x: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[0:1], 0x8 +; GFX1250-NEXT: v_mov_b32_e32 v2, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[0:1] +; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[2:3] +; GFX1250-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %load = load i64, ptr addrspace(4) %imp_arg_ptr store i64 %load, ptr addrspace(1) %out @@ -228,6 +276,14 @@ define amdgpu_kernel void @incorrect_type_i16_block_count_x(ptr addrspace(1) inr ; GFX90a-NEXT: v_mov_b32_e32 v1, s0 ; GFX90a-NEXT: global_store_short v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: incorrect_type_i16_block_count_x: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: global_load_u16 v1, v0, s[0:1] offset:8 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[2:3] +; GFX1250-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %load = load i16, ptr addrspace(4) %imp_arg_ptr store i16 %load, ptr addrspace(1) %out @@ -261,6 +317,12 @@ define amdgpu_kernel void @preload_block_count_y(ptr addrspace(1) inreg %out) #0 ; GFX90a-NEXT: v_mov_b32_e32 v1, s11 ; GFX90a-NEXT: global_store_dword v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: preload_block_count_y: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s5 +; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] +; GFX1250-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 4 %load = load i32, ptr addrspace(4) %gep @@ -300,6 +362,14 @@ define amdgpu_kernel void @random_incorrect_offset(ptr addrspace(1) inreg %out) ; GFX90a-NEXT: v_mov_b32_e32 v1, s0 ; GFX90a-NEXT: global_store_dword v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: random_incorrect_offset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b32 s0, s[0:1], 0xa +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] +; GFX1250-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 2 %load = load i32, ptr addrspace(4) %gep @@ -336,6 +406,12 @@ define amdgpu_kernel void @preload_block_count_z(ptr addrspace(1) inreg %out) #0 ; GFX90a-NEXT: v_mov_b32_e32 v1, s12 ; GFX90a-NEXT: global_store_dword v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: preload_block_count_z: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s6 +; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] +; GFX1250-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 8 %load = load i32, ptr addrspace(4) %gep @@ -376,6 +452,15 @@ define amdgpu_kernel void @preload_block_count_x_imparg_align_ptr_i8(ptr addrspa ; GFX90a-NEXT: v_mov_b32_e32 v1, s0 ; GFX90a-NEXT: global_store_dword v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: preload_block_count_x_imparg_align_ptr_i8: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_and_b32 s0, s4, 0xff +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: s_add_co_i32 s0, s6, s0 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] +; GFX1250-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %load = load i32, ptr addrspace(4) %imp_arg_ptr %ext = zext i8 %val to i32 @@ -417,6 +502,13 @@ define amdgpu_kernel void @preload_block_count_xyz(ptr addrspace(1) inreg %out) ; GFX90a-NEXT: v_mov_b32_e32 v2, s12 ; GFX90a-NEXT: global_store_dwordx3 v3, v[0:2], s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: preload_block_count_xyz: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v0, s4 +; GFX1250-NEXT: v_dual_mov_b32 v1, s5 :: v_dual_mov_b32 v2, s6 +; GFX1250-NEXT: global_store_b96 v3, v[0:2], s[2:3] +; GFX1250-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %gep_x = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 0 %load_x = load i32, ptr addrspace(4) %gep_x @@ -461,6 +553,14 @@ define amdgpu_kernel void @preload_workgroup_size_x(ptr addrspace(1) inreg %out) ; GFX90a-NEXT: v_mov_b32_e32 v1, s0 ; GFX90a-NEXT: global_store_dword v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: preload_workgroup_size_x: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_and_b32 s0, s7, 0xffff +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] +; GFX1250-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 12 %load = load i16, ptr addrspace(4) %gep @@ -499,6 +599,14 @@ define amdgpu_kernel void @preload_workgroup_size_y(ptr addrspace(1) inreg %out) ; GFX90a-NEXT: v_mov_b32_e32 v1, s0 ; GFX90a-NEXT: global_store_dword v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: preload_workgroup_size_y: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_lshr_b32 s0, s7, 16 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] +; GFX1250-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 14 %load = load i16, ptr addrspace(4) %gep @@ -539,6 +647,14 @@ define amdgpu_kernel void @preload_workgroup_size_z(ptr addrspace(1) inreg %out) ; GFX90a-NEXT: v_mov_b32_e32 v1, s0 ; GFX90a-NEXT: global_store_dword v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: preload_workgroup_size_z: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_and_b32 s0, s8, 0xffff +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] +; GFX1250-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 16 %load = load i16, ptr addrspace(4) %gep @@ -587,6 +703,16 @@ define amdgpu_kernel void @preload_workgroup_size_xyz(ptr addrspace(1) inreg %ou ; GFX90a-NEXT: v_mov_b32_e32 v2, s2 ; GFX90a-NEXT: global_store_dwordx3 v3, v[0:2], s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: preload_workgroup_size_xyz: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_lshr_b32 s0, s7, 16 +; GFX1250-NEXT: s_and_b32 s1, s7, 0xffff +; GFX1250-NEXT: s_and_b32 s4, s8, 0xffff +; GFX1250-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v0, s1 +; GFX1250-NEXT: v_dual_mov_b32 v1, s0 :: v_dual_mov_b32 v2, s4 +; GFX1250-NEXT: global_store_b96 v3, v[0:2], s[2:3] +; GFX1250-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %gep_x = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 12 %load_x = load i16, ptr addrspace(4) %gep_x @@ -636,6 +762,14 @@ define amdgpu_kernel void @preload_remainder_x(ptr addrspace(1) inreg %out) #0 { ; GFX90a-NEXT: v_mov_b32_e32 v1, s0 ; GFX90a-NEXT: global_store_dword v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: preload_remainder_x: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_lshr_b32 s0, s8, 16 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] +; GFX1250-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 18 %load = load i16, ptr addrspace(4) %gep @@ -674,6 +808,14 @@ define amdgpu_kernel void @preloadremainder_y(ptr addrspace(1) inreg %out) #0 { ; GFX90a-NEXT: v_mov_b32_e32 v1, s0 ; GFX90a-NEXT: global_store_dword v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: preloadremainder_y: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_and_b32 s0, s9, 0xffff +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] +; GFX1250-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 20 %load = load i16, ptr addrspace(4) %gep @@ -712,6 +854,14 @@ define amdgpu_kernel void @preloadremainder_z(ptr addrspace(1) inreg %out) #0 { ; GFX90a-NEXT: v_mov_b32_e32 v1, s0 ; GFX90a-NEXT: global_store_dword v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: preloadremainder_z: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_lshr_b32 s0, s9, 16 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] +; GFX1250-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 22 %load = load i16, ptr addrspace(4) %gep @@ -758,6 +908,16 @@ define amdgpu_kernel void @preloadremainder_xyz(ptr addrspace(1) inreg %out) #0 ; GFX90a-NEXT: v_mov_b32_e32 v2, s0 ; GFX90a-NEXT: global_store_dwordx3 v3, v[0:2], s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: preloadremainder_xyz: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_lshr_b32 s0, s9, 16 +; GFX1250-NEXT: s_lshr_b32 s1, s8, 16 +; GFX1250-NEXT: s_and_b32 s4, s9, 0xffff +; GFX1250-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v0, s1 +; GFX1250-NEXT: v_dual_mov_b32 v1, s4 :: v_dual_mov_b32 v2, s0 +; GFX1250-NEXT: global_store_b96 v3, v[0:2], s[2:3] +; GFX1250-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %gep_x = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 18 %load_x = load i16, ptr addrspace(4) %gep_x @@ -805,6 +965,14 @@ define amdgpu_kernel void @no_free_sgprs_preloadremainder_z(ptr addrspace(1) inr ; GFX90a-NEXT: v_mov_b32_e32 v1, s0 ; GFX90a-NEXT: global_store_dword v0, v1, s[14:15] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: no_free_sgprs_preloadremainder_z: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_lshr_b32 s0, s15, 16 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[8:9] +; GFX1250-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 22 %load = load i16, ptr addrspace(4) %gep @@ -845,6 +1013,12 @@ define amdgpu_kernel void @preload_block_max_user_sgprs(ptr addrspace(1) inreg % ; GFX90a-NEXT: v_mov_b32_e32 v1, s0 ; GFX90a-NEXT: global_store_dword v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: preload_block_max_user_sgprs: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s12 +; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] +; GFX1250-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %load = load i32, ptr addrspace(4) %imp_arg_ptr store i32 %load, ptr addrspace(1) %out @@ -887,6 +1061,15 @@ define amdgpu_kernel void @preload_block_count_z_workgroup_size_z_remainder_z(pt ; GFX90a-NEXT: v_mov_b32_e32 v2, s0 ; GFX90a-NEXT: global_store_dwordx3 v3, v[0:2], s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: preload_block_count_z_workgroup_size_z_remainder_z: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_lshr_b32 s0, s9, 16 +; GFX1250-NEXT: s_and_b32 s1, s8, 0xffff +; GFX1250-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v0, s6 +; GFX1250-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s0 +; GFX1250-NEXT: global_store_b96 v3, v[0:2], s[2:3] +; GFX1250-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %gep0 = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 8 %gep1 = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 16 diff --git a/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll b/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll index efe4cfa7e5d2b..d5edfb42fa6d1 100644 --- a/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll +++ b/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 ; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx942 < %s | FileCheck -check-prefixes=GFX942 %s - ; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx90a < %s | FileCheck -check-prefixes=GFX90a %s +; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250 %s define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) inreg %out, i8 inreg %arg0) #0 { ; GFX942-LABEL: ptr1_i8: @@ -33,6 +33,14 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) inreg %out, i8 inreg %arg0) ; GFX90a-NEXT: v_mov_b32_e32 v1, s0 ; GFX90a-NEXT: global_store_dword v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: ptr1_i8: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_and_b32 s0, s4, 0xff +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] +; GFX1250-NEXT: s_endpgm %ext = zext i8 %arg0 to i32 store i32 %ext, ptr addrspace(1) %out ret void @@ -68,6 +76,14 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) inreg %out, i8 zero ; GFX90a-NEXT: v_mov_b32_e32 v1, s0 ; GFX90a-NEXT: global_store_dword v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: ptr1_i8_zext_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_and_b32 s0, s4, 0xff +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] +; GFX1250-NEXT: s_endpgm %ext = zext i8 %arg0 to i32 store i32 %ext, ptr addrspace(1) %out, align 4 ret void @@ -103,6 +119,14 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) inreg %out, i16 ; GFX90a-NEXT: v_mov_b32_e32 v1, s0 ; GFX90a-NEXT: global_store_dword v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: ptr1_i16_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_and_b32 s0, s4, 0xffff +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] +; GFX1250-NEXT: s_endpgm %ext = zext i16 %arg0 to i32 store i32 %ext, ptr addrspace(1) %out, align 4 ret void @@ -136,6 +160,12 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) inreg %out, i32 ; GFX90a-NEXT: v_mov_b32_e32 v1, s10 ; GFX90a-NEXT: global_store_dword v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: ptr1_i32_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4 +; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] +; GFX1250-NEXT: s_endpgm store i32 %arg0, ptr addrspace(1) %out ret void } @@ -172,6 +202,14 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 inreg %arg0, ptr addrspa ; GFX90a-NEXT: v_mov_b32_e32 v1, s0 ; GFX90a-NEXT: global_store_dword v0, v1, s[10:11] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: i32_ptr1_i32_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_add_co_i32 s0, s2, s6 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[4:5] +; GFX1250-NEXT: s_endpgm %add = add i32 %arg0, %arg1 store i32 %add, ptr addrspace(1) %out ret void @@ -211,6 +249,16 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) inreg %out, ; GFX90a-NEXT: v_mov_b32_e32 v1, s0 ; GFX90a-NEXT: global_store_dword v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: ptr1_i16_i16_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_lshr_b32 s0, s4, 16 +; GFX1250-NEXT: s_and_b32 s1, s4, 0xffff +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: s_add_co_i32 s0, s1, s0 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] +; GFX1250-NEXT: s_endpgm %ext = zext i16 %arg0 to i32 %ext1 = zext i16 %arg1 to i32 %add = add i32 %ext, %ext1 @@ -246,6 +294,12 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) inreg %out, <2 ; GFX90a-NEXT: v_mov_b32_e32 v1, s10 ; GFX90a-NEXT: global_store_short v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: ptr1_v2i8_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4 +; GFX1250-NEXT: global_store_b16 v0, v1, s[2:3] +; GFX1250-NEXT: s_endpgm store <2 x i8> %in, ptr addrspace(1) %out ret void } @@ -289,6 +343,18 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) inreg %out, ptr ad ; GFX90a-NEXT: global_store_dword v0, v2, s[8:9] ; GFX90a-NEXT: s_waitcnt vmcnt(0) ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: byref_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[0:1], 0x100 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0 +; GFX1250-NEXT: v_mov_b32_e32 v2, s1 +; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_storecnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v2, s[2:3] scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_storecnt 0x0 +; GFX1250-NEXT: s_endpgm %in = load i32, ptr addrspace(4) %in.byref store volatile i32 %in, ptr addrspace(1) %out, align 4 store volatile i32 %after.offset, ptr addrspace(1) %out, align 4 @@ -335,6 +401,18 @@ define amdgpu_kernel void @byref_staggered_preload_arg(ptr addrspace(1) inreg %o ; GFX90a-NEXT: global_store_dword v0, v2, s[8:9] ; GFX90a-NEXT: s_waitcnt vmcnt(0) ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: byref_staggered_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[0:1], 0x100 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0 +; GFX1250-NEXT: v_mov_b32_e32 v2, s1 +; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_storecnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v2, s[2:3] scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_storecnt 0x0 +; GFX1250-NEXT: s_endpgm %in = load i32, ptr addrspace(4) %in.byref store volatile i32 %in, ptr addrspace(1) %out, align 4 store volatile i32 %after.offset, ptr addrspace(1) %out, align 4 @@ -390,6 +468,20 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture inreg %out, <8 x ; GFX90a-NEXT: v_mov_b32_e32 v3, s15 ; GFX90a-NEXT: global_store_dwordx4 v4, v[0:3], s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: v8i32_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b256 s[4:11], s[0:1], 0x20 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v8, 0 :: v_dual_mov_b32 v0, s8 +; GFX1250-NEXT: v_dual_mov_b32 v1, s9 :: v_dual_mov_b32 v2, s10 +; GFX1250-NEXT: v_dual_mov_b32 v3, s11 :: v_dual_mov_b32 v4, s4 +; GFX1250-NEXT: v_dual_mov_b32 v5, s5 :: v_dual_mov_b32 v6, s6 +; GFX1250-NEXT: v_mov_b32_e32 v7, s7 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b128 v8, v[0:3], s[2:3] offset:16 +; GFX1250-NEXT: global_store_b128 v8, v[4:7], s[2:3] +; GFX1250-NEXT: s_endpgm store <8 x i32> %in, ptr addrspace(1) %out, align 4 ret void } @@ -425,6 +517,15 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture inreg %o ; GFX90a-NEXT: v_mov_b32_e32 v1, s10 ; GFX90a-NEXT: global_store_dword v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: v3i16_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s5 +; GFX1250-NEXT: v_mov_b32_e32 v2, s4 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b16 v0, v1, s[2:3] offset:4 +; GFX1250-NEXT: global_store_b32 v0, v2, s[2:3] +; GFX1250-NEXT: s_endpgm store <3 x i16> %in, ptr addrspace(1) %out, align 4 ret void } @@ -461,6 +562,13 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture inreg %o ; GFX90a-NEXT: v_mov_b32_e32 v3, 0 ; GFX90a-NEXT: global_store_dwordx3 v3, v[0:2], s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: v3i32_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7 +; GFX1250-NEXT: v_dual_mov_b32 v2, s8 :: v_dual_mov_b32 v3, 0 +; GFX1250-NEXT: global_store_b96 v3, v[0:2], s[2:3] +; GFX1250-NEXT: s_endpgm store <3 x i32> %in, ptr addrspace(1) %out, align 4 ret void } @@ -497,6 +605,13 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture inreg %o ; GFX90a-NEXT: v_mov_b32_e32 v2, s14 ; GFX90a-NEXT: global_store_dwordx3 v3, v[0:2], s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: v3f32_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v0, s6 +; GFX1250-NEXT: v_dual_mov_b32 v1, s7 :: v_dual_mov_b32 v2, s8 +; GFX1250-NEXT: global_store_b96 v3, v[0:2], s[2:3] +; GFX1250-NEXT: s_endpgm store <3 x float> %in, ptr addrspace(1) %out, align 4 ret void } @@ -546,6 +661,19 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture inreg %ou ; GFX90a-NEXT: v_mov_b32_e32 v1, s0 ; GFX90a-NEXT: global_store_dword v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: v5i8_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_pack_lh_b32_b16 s0, 0, s4 +; GFX1250-NEXT: s_and_b32 s1, s4, 0xffff +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s5 +; GFX1250-NEXT: s_or_b32 s0, s1, s0 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: v_mov_b32_e32 v2, s0 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b8 v0, v1, s[2:3] offset:4 +; GFX1250-NEXT: global_store_b32 v0, v2, s[2:3] +; GFX1250-NEXT: s_endpgm store <5 x i8> %in, ptr addrspace(1) %out, align 4 ret void } @@ -604,6 +732,24 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture inreg %out, <5 x ; GFX90a-NEXT: v_mov_b32_e32 v3, s15 ; GFX90a-NEXT: global_store_dwordx4 v4, v[0:3], s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: v5f64_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: s_load_b64 s[12:13], s[0:1], 0x60 +; GFX1250-NEXT: s_load_b256 s[4:11], s[0:1], 0x40 +; GFX1250-NEXT: v_mov_b32_e32 v10, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b64_e32 v[8:9], s[12:13] +; GFX1250-NEXT: v_dual_mov_b32 v0, s8 :: v_dual_mov_b32 v1, s9 +; GFX1250-NEXT: v_dual_mov_b32 v2, s10 :: v_dual_mov_b32 v3, s11 +; GFX1250-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5 +; GFX1250-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7 +; GFX1250-NEXT: s_clause 0x2 +; GFX1250-NEXT: global_store_b64 v10, v[8:9], s[2:3] offset:32 +; GFX1250-NEXT: global_store_b128 v10, v[0:3], s[2:3] offset:16 +; GFX1250-NEXT: global_store_b128 v10, v[4:7], s[2:3] +; GFX1250-NEXT: s_endpgm store <5 x double> %in, ptr addrspace(1) %out, align 8 ret void } @@ -665,6 +811,20 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) inreg %out, <8 x i8 ; GFX90a-NEXT: v_mov_b32_e32 v2, 0 ; GFX90a-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: v8i8_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_pack_lh_b32_b16 s0, 0, s5 +; GFX1250-NEXT: s_pack_lh_b32_b16 s1, 0, s4 +; GFX1250-NEXT: s_and_b32 s4, s4, 0xffff +; GFX1250-NEXT: s_and_b32 s5, s5, 0xffff +; GFX1250-NEXT: s_or_b32 s1, s4, s1 +; GFX1250-NEXT: s_or_b32 s0, s5, s0 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: v_dual_mov_b32 v0, s1 :: v_dual_mov_b32 v1, s0 +; GFX1250-NEXT: v_mov_b32_e32 v2, 0 +; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[2:3] +; GFX1250-NEXT: s_endpgm store <8 x i8> %in, ptr addrspace(1) %out ret void } @@ -696,6 +856,13 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) inreg %out, i ; GFX90a-NEXT: v_pk_mov_b32 v[0:1], s[10:11], s[10:11] op_sel:[0,1] ; GFX90a-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: i64_kernel_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[4:5] +; GFX1250-NEXT: v_mov_b32_e32 v2, 0 +; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[2:3] +; GFX1250-NEXT: s_endpgm store i64 %a, ptr addrspace(1) %out, align 8 ret void } @@ -727,6 +894,13 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) inreg %out, d ; GFX90a-NEXT: v_pk_mov_b32 v[0:1], s[10:11], s[10:11] op_sel:[0,1] ; GFX90a-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: f64_kernel_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[4:5] +; GFX1250-NEXT: v_mov_b32_e32 v2, 0 +; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[2:3] +; GFX1250-NEXT: s_endpgm store double %in, ptr addrspace(1) %out ret void } @@ -759,6 +933,12 @@ define amdgpu_kernel void @half_kernel_preload_arg(ptr addrspace(1) inreg %out, ; GFX90a-NEXT: v_mov_b32_e32 v1, s10 ; GFX90a-NEXT: global_store_short v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: half_kernel_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4 +; GFX1250-NEXT: global_store_b16 v0, v1, s[2:3] +; GFX1250-NEXT: s_endpgm store half %in, ptr addrspace(1) %out ret void } @@ -791,6 +971,12 @@ define amdgpu_kernel void @bfloat_kernel_preload_arg(ptr addrspace(1) inreg %out ; GFX90a-NEXT: v_mov_b32_e32 v1, s10 ; GFX90a-NEXT: global_store_short v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: bfloat_kernel_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4 +; GFX1250-NEXT: global_store_b16 v0, v1, s[2:3] +; GFX1250-NEXT: s_endpgm store bfloat %in, ptr addrspace(1) %out ret void } @@ -823,6 +1009,12 @@ define amdgpu_kernel void @v2bfloat_kernel_preload_arg(ptr addrspace(1) inreg %o ; GFX90a-NEXT: v_mov_b32_e32 v1, s10 ; GFX90a-NEXT: global_store_dword v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: v2bfloat_kernel_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4 +; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] +; GFX1250-NEXT: s_endpgm store <2 x bfloat> %in, ptr addrspace(1) %out ret void } @@ -858,6 +1050,15 @@ define amdgpu_kernel void @v3bfloat_kernel_preload_arg(ptr addrspace(1) inreg %o ; GFX90a-NEXT: v_mov_b32_e32 v1, s10 ; GFX90a-NEXT: global_store_dword v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: v3bfloat_kernel_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s5 +; GFX1250-NEXT: v_mov_b32_e32 v2, s4 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b16 v0, v1, s[2:3] offset:4 +; GFX1250-NEXT: global_store_b32 v0, v2, s[2:3] +; GFX1250-NEXT: s_endpgm store <3 x bfloat> %in, ptr addrspace(1) %out ret void } @@ -894,6 +1095,13 @@ define amdgpu_kernel void @v6bfloat_kernel_preload_arg(ptr addrspace(1) inreg %o ; GFX90a-NEXT: v_mov_b32_e32 v3, 0 ; GFX90a-NEXT: global_store_dwordx3 v3, v[0:2], s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: v6bfloat_kernel_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7 +; GFX1250-NEXT: v_dual_mov_b32 v2, s8 :: v_dual_mov_b32 v3, 0 +; GFX1250-NEXT: global_store_b96 v3, v[0:2], s[2:3] +; GFX1250-NEXT: s_endpgm store <6 x bfloat> %in, ptr addrspace(1) %out ret void } @@ -939,6 +1147,17 @@ define amdgpu_kernel void @half_v7bfloat_kernel_preload_arg(ptr addrspace(1) inr ; GFX90a-NEXT: v_mov_b32_e32 v1, s13 ; GFX90a-NEXT: global_store_dwordx3 v3, v[0:2], s[0:1] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: half_v7bfloat_kernel_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v4, s4 +; GFX1250-NEXT: v_dual_mov_b32 v5, s9 :: v_dual_mov_b32 v2, s8 +; GFX1250-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7 +; GFX1250-NEXT: s_clause 0x2 +; GFX1250-NEXT: global_store_b16 v3, v4, s[2:3] +; GFX1250-NEXT: global_store_b16 v3, v5, s[10:11] offset:12 +; GFX1250-NEXT: global_store_b96 v3, v[0:2], s[10:11] +; GFX1250-NEXT: s_endpgm store half %in, ptr addrspace(1) %out store <7 x bfloat> %in2, ptr addrspace(1) %out2 ret void @@ -974,6 +1193,14 @@ define amdgpu_kernel void @i1_kernel_preload_arg(ptr addrspace(1) inreg %out, i1 ; GFX90a-NEXT: v_mov_b32_e32 v1, s0 ; GFX90a-NEXT: global_store_byte v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: i1_kernel_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_and_b32 s0, s4, 1 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0 +; GFX1250-NEXT: global_store_b8 v0, v1, s[2:3] +; GFX1250-NEXT: s_endpgm store i1 %in, ptr addrspace(1) %out ret void } @@ -1012,6 +1239,14 @@ define amdgpu_kernel void @fp128_kernel_preload_arg(ptr addrspace(1) inreg %out, ; GFX90a-NEXT: v_mov_b32_e32 v3, s15 ; GFX90a-NEXT: global_store_dwordx4 v4, v[0:3], s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: fp128_kernel_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v0, s6 +; GFX1250-NEXT: v_dual_mov_b32 v1, s7 :: v_dual_mov_b32 v2, s8 +; GFX1250-NEXT: v_mov_b32_e32 v3, s9 +; GFX1250-NEXT: global_store_b128 v4, v[0:3], s[2:3] +; GFX1250-NEXT: s_endpgm store fp128 %in, ptr addrspace(1) %out ret void } @@ -1063,6 +1298,20 @@ define amdgpu_kernel void @v7i8_kernel_preload_arg(ptr addrspace(1) inreg %out, ; GFX90a-NEXT: v_mov_b32_e32 v1, s0 ; GFX90a-NEXT: global_store_dword v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: v7i8_kernel_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_pack_lh_b32_b16 s0, 0, s4 +; GFX1250-NEXT: s_and_b32 s1, s4, 0xffff +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s5 +; GFX1250-NEXT: s_or_b32 s0, s1, s0 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: v_mov_b32_e32 v2, s0 +; GFX1250-NEXT: s_clause 0x2 +; GFX1250-NEXT: global_store_d16_hi_b8 v0, v1, s[2:3] offset:6 +; GFX1250-NEXT: global_store_b16 v0, v1, s[2:3] offset:4 +; GFX1250-NEXT: global_store_b32 v0, v2, s[2:3] +; GFX1250-NEXT: s_endpgm store <7 x i8> %in, ptr addrspace(1) %out ret void } @@ -1103,6 +1352,16 @@ define amdgpu_kernel void @v7half_kernel_preload_arg(ptr addrspace(1) inreg %out ; GFX90a-NEXT: v_mov_b32_e32 v1, s13 ; GFX90a-NEXT: global_store_dwordx3 v3, v[0:2], s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: v7half_kernel_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v4, s9 +; GFX1250-NEXT: v_dual_mov_b32 v2, s8 :: v_dual_mov_b32 v0, s6 +; GFX1250-NEXT: v_mov_b32_e32 v1, s7 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b16 v3, v4, s[2:3] offset:12 +; GFX1250-NEXT: global_store_b96 v3, v[0:2], s[2:3] +; GFX1250-NEXT: s_endpgm store <7 x half> %in, ptr addrspace(1) %out ret void } @@ -1139,6 +1398,15 @@ define amdgpu_kernel void @i16_i32_kernel_preload_arg(ptr addrspace(1) inreg %ou ; GFX90a-NEXT: v_mov_b32_e32 v1, s11 ; GFX90a-NEXT: global_store_dword v0, v1, s[12:13] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: i16_i32_kernel_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4 +; GFX1250-NEXT: v_mov_b32_e32 v2, s5 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b16 v0, v1, s[2:3] +; GFX1250-NEXT: global_store_b32 v0, v2, s[6:7] +; GFX1250-NEXT: s_endpgm store i16 %in, ptr addrspace(1) %out store i32 %in2, ptr addrspace(1) %out2 ret void @@ -1181,6 +1449,16 @@ define amdgpu_kernel void @i16_v3i32_kernel_preload_arg(ptr addrspace(1) inreg % ; GFX90a-NEXT: s_waitcnt lgkmcnt(0) ; GFX90a-NEXT: global_store_dwordx3 v3, v[0:2], s[0:1] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: i16_v3i32_kernel_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v4, s4 +; GFX1250-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7 +; GFX1250-NEXT: v_mov_b32_e32 v2, s8 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b16 v3, v4, s[2:3] +; GFX1250-NEXT: global_store_b96 v3, v[0:2], s[10:11] +; GFX1250-NEXT: s_endpgm store i16 %in, ptr addrspace(1) %out store <3 x i32> %in2, ptr addrspace(1) %out2 ret void @@ -1216,6 +1494,14 @@ define amdgpu_kernel void @i16_i16_kernel_preload_arg(ptr addrspace(1) inreg %ou ; GFX90a-NEXT: global_store_short v0, v1, s[8:9] ; GFX90a-NEXT: global_store_short_d16_hi v0, v1, s[12:13] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: i16_i16_kernel_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b16 v0, v1, s[2:3] +; GFX1250-NEXT: global_store_d16_hi_b16 v0, v1, s[6:7] +; GFX1250-NEXT: s_endpgm store i16 %in, ptr addrspace(1) %out store i16 %in2, ptr addrspace(1) %out2 ret void @@ -1261,6 +1547,14 @@ define amdgpu_kernel void @i16_v2i8_kernel_preload_arg(ptr addrspace(1) inreg %o ; GFX90a-NEXT: v_mov_b32_e32 v1, s0 ; GFX90a-NEXT: global_store_short v0, v1, s[12:13] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: i16_v2i8_kernel_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: global_store_b16 v0, v1, s[2:3] +; GFX1250-NEXT: global_store_d16_hi_b16 v0, v1, s[6:7] +; GFX1250-NEXT: s_endpgm store i16 %in, ptr addrspace(1) %out store <2 x i8> %in2, ptr addrspace(1) %out2 ret void @@ -1302,6 +1596,16 @@ define amdgpu_kernel void @i32_ptr1_i32_staggered_preload_arg(i32 inreg %arg0, p ; GFX90a-NEXT: v_mov_b32_e32 v1, s2 ; GFX90a-NEXT: global_store_dword v0, v1, s[0:1] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: i32_ptr1_i32_staggered_preload_arg: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b96 s[4:6], s[0:1], 0x8 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_add_co_i32 s0, s2, s6 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[4:5] +; GFX1250-NEXT: s_endpgm %add = add i32 %arg0, %arg1 store i32 %add, ptr addrspace(1) %out ret void @@ -1336,6 +1640,14 @@ define amdgpu_kernel void @ptr1_i8_trailing_unused(ptr addrspace(1) inreg %out, ; GFX90a-NEXT: v_mov_b32_e32 v1, s0 ; GFX90a-NEXT: global_store_dword v0, v1, s[8:9] ; GFX90a-NEXT: s_endpgm +; +; GFX1250-LABEL: ptr1_i8_trailing_unused: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_and_b32 s0, s4, 0xff +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] +; GFX1250-NEXT: s_endpgm %ext = zext i8 %arg0 to i32 store i32 %ext, ptr addrspace(1) %out ret void