From cfc0b3d8bce274d9e8729f098abed0583a1a578d Mon Sep 17 00:00:00 2001 From: Prasoon Mishra Date: Fri, 26 Sep 2025 11:14:07 +0000 Subject: [PATCH] [AMDGPU] Sink uniform buffer address offsets into soffset This patch implements an optimization to partition MUBUF load/store offsets into vector and scalar components for better address coalescing and reduced VGPR pressure. Transform buffer operations where voffset = add(uniform, divergent) by moving the uniform part to soffset and keeping the divergent part in voffset: Before: v_add_u32 v1, v0, sN buffer_{load,store}_T v*, v1, s[bufDesc:bufDesc+3] offen After: buffer_{load,store}_T v*, v0, s[bufDesc:bufDesc+3], sN offen The optimization currently applies to raw buffer loads/stores in OFFEN addressing mode when soffset is initially zero. Tests includes comprehensive validation of both buffer loads and stores across various supported variants (i8, i16, i32, vectors, floats) with positive and negative test cases. --- llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp | 88 +++- llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h | 1 + .../AMDGPU/buffer-offset-to-soffset-loads.ll | 398 +++++++++++++++++ .../AMDGPU/buffer-offset-to-soffset-stores.ll | 399 ++++++++++++++++++ .../AMDGPU/llvm.amdgcn.raw.buffer.load.ll | 12 +- .../AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.ll | 12 +- llvm/test/CodeGen/AMDGPU/smrd.ll | 24 +- 7 files changed, 909 insertions(+), 25 deletions(-) create mode 100644 llvm/test/CodeGen/AMDGPU/buffer-offset-to-soffset-loads.ll create mode 100644 llvm/test/CodeGen/AMDGPU/buffer-offset-to-soffset-stores.ll diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp index 2192a72bb27b7..3a67170ccc598 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp @@ -328,9 +328,6 @@ bool AMDGPUDAGToDAGISel::matchLoadD16FromBuildVector(SDNode *N) const { } void AMDGPUDAGToDAGISel::PreprocessISelDAG() { - if (!Subtarget->d16PreservesUnusedBits()) - return; - SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end(); bool MadeChange = false; @@ -341,8 +338,23 @@ void AMDGPUDAGToDAGISel::PreprocessISelDAG() { switch (N->getOpcode()) { case ISD::BUILD_VECTOR: - // TODO: Match load d16 from shl (extload:i16), 16 - MadeChange |= matchLoadD16FromBuildVector(N); + // D16 optimization requires subtarget support + if (Subtarget->d16PreservesUnusedBits()) { + // TODO: Match load d16 from shl (extload:i16), 16 + MadeChange |= matchLoadD16FromBuildVector(N); + } + break; + case AMDGPUISD::BUFFER_LOAD: + case AMDGPUISD::BUFFER_LOAD_UBYTE: + case AMDGPUISD::BUFFER_LOAD_USHORT: + case AMDGPUISD::BUFFER_LOAD_BYTE: + case AMDGPUISD::BUFFER_LOAD_SHORT: + MadeChange |= sinkUniformAddendIntoSOffset(N, false); + break; + case AMDGPUISD::BUFFER_STORE: + case AMDGPUISD::BUFFER_STORE_BYTE: + case AMDGPUISD::BUFFER_STORE_SHORT: + MadeChange |= sinkUniformAddendIntoSOffset(N, true); break; default: break; @@ -356,6 +368,72 @@ void AMDGPUDAGToDAGISel::PreprocessISelDAG() { } } +/// Sink uniform addends in buffer address calculations into SOffset. +/// +/// Transforms buffer loads/stores with voffset = add(uniform, divergent) +/// into voffset = divergent, soffset = uniform for better address coalescing. +/// Only applies when the result will use OFFEN addressing mode. +bool AMDGPUDAGToDAGISel::sinkUniformAddendIntoSOffset(SDNode *N, bool IsStore) { + + // Buffer operand layout: + // Load: (chain, rsrc, vindex, voffset, soffset, offset, cachepolicy, idxen) + // Store: (chain, vdata, rsrc, vindex, voffset, soffset, offset, cachepolicy, idxen) + const unsigned VIndexIdx = IsStore ? 3 : 2; + const unsigned VOffsetIdx = IsStore ? 4 : 3; + const unsigned SOffsetIdx = IsStore ? 5 : 4; + const unsigned IdxEnIdx = IsStore ? 8 : 7; + + if (N->getNumOperands() <= IdxEnIdx) + return false; + + SDValue VIndex = N->getOperand(VIndexIdx); + SDValue VOffset = N->getOperand(VOffsetIdx); + SDValue SOffset = N->getOperand(SOffsetIdx); + SDValue IdxEn = N->getOperand(IdxEnIdx); + + // Only optimize OFFEN mode: vindex=0, idxen=0 guarantees this + if (!isNullConstant(VIndex) || !isNullConstant(IdxEn)) + return false; + + // Only optimize when soffset is currently zero + // TODO: Handle non-zero soffset by combining with uniform addend + if (!isNullConstant(SOffset)) + return false; + + // voffset must be ADD of uniform and divergent values + if (VOffset.getOpcode() != ISD::ADD) + return false; + + // Identify uniform and divergent addends + auto IsUniform = [](SDValue V) { + return isa(V) || !V.getNode()->isDivergent(); + }; + + SDValue LHS = VOffset.getOperand(0); + SDValue RHS = VOffset.getOperand(1); + bool LHSUniform = IsUniform(LHS); + bool RHSUniform = IsUniform(RHS); + + // Need exactly one uniform and one divergent operand + if (LHSUniform == RHSUniform) + return false; + + SDValue UniformAddend = LHSUniform ? LHS : RHS; + SDValue DivergentAddend = LHSUniform ? RHS : LHS; + + // Perform the transformation: sink uniform part into soffset + // SIFixSGPRCopies will handle any SGPR register class fixups if needed. + SmallVector NewOps(N->op_values()); + NewOps[VOffsetIdx] = DivergentAddend; + NewOps[SOffsetIdx] = UniformAddend; + + LLVM_DEBUG(dbgs() << "Sinking uniform addend into SOffset for buffer " + << (IsStore ? "store" : "load") << '\n'); + + CurDAG->UpdateNodeOperands(N, NewOps); + return true; +} + bool AMDGPUDAGToDAGISel::isInlineImmediate(const SDNode *N) const { if (N->isUndef()) return true; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h index 4fa0d3f72e1c7..8b4e8803955e3 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h @@ -81,6 +81,7 @@ class AMDGPUDAGToDAGISel : public SelectionDAGISel { bool runOnMachineFunction(MachineFunction &MF) override; bool matchLoadD16FromBuildVector(SDNode *N) const; + bool sinkUniformAddendIntoSOffset(SDNode *N, bool IsStore); void PreprocessISelDAG() override; void Select(SDNode *N) override; void PostprocessISelDAG() override; diff --git a/llvm/test/CodeGen/AMDGPU/buffer-offset-to-soffset-loads.ll b/llvm/test/CodeGen/AMDGPU/buffer-offset-to-soffset-loads.ll new file mode 100644 index 0000000000000..a77768c631e3c --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/buffer-offset-to-soffset-loads.ll @@ -0,0 +1,398 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -global-isel=0 < %s | FileCheck -check-prefixes=CHECK,GFX900 %s + +; Test comprehensive patterns for ADD(divergent, uniform) optimization in buffer loads + +; Basic workitem.id.x + uniform +define amdgpu_kernel void @test_basic_workitem_uniform(ptr addrspace(1) %output, i32 %soffset) { +; CHECK-LABEL: test_basic_workitem_uniform: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dword s6, s[4:5], 0x2c +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: buffer_load_dword v0, v0, s[0:3], s6 offen +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_store_dword v1, v0, s[0:1] +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %voffset = call i32 @llvm.amdgcn.workitem.id.x() + %sum = add i32 %voffset, %soffset + %val = call i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32> %desc, i32 %sum, i32 0, i32 0) + store i32 %val, ptr addrspace(1) %output + ret void +} + +; Reversed operands (uniform + divergent) +define amdgpu_kernel void @test_reversed_operands(ptr addrspace(1) %output, i32 %soffset) { +; CHECK-LABEL: test_reversed_operands: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dword s6, s[4:5], 0x2c +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: buffer_load_dword v0, v0, s[0:3], s6 offen +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_store_dword v1, v0, s[0:1] +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %voffset = call i32 @llvm.amdgcn.workitem.id.x() + %sum = add i32 %soffset, %voffset ; Reversed: uniform + divergent + %val = call i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32> %desc, i32 %sum, i32 0, i32 0) + store i32 %val, ptr addrspace(1) %output + ret void +} + +; Multiple buffer loads with same pattern +define amdgpu_kernel void @test_multiple_loads(ptr addrspace(1) %output, i32 %soffset1, i32 %soffset2) { +; CHECK-LABEL: test_multiple_loads: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: buffer_load_dword v1, v0, s[4:7], s2 offen +; CHECK-NEXT: buffer_load_dword v2, v0, s[4:7], s3 offen +; CHECK-NEXT: v_mov_b32_e32 v0, 0 +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_add_u32_e32 v1, v1, v2 +; CHECK-NEXT: global_store_dword v0, v1, s[0:1] +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %voffset = call i32 @llvm.amdgcn.workitem.id.x() + + %sum1 = add i32 %voffset, %soffset1 + %val1 = call i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32> %desc, i32 %sum1, i32 0, i32 0) + + %sum2 = add i32 %voffset, %soffset2 + %val2 = call i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32> %desc, i32 %sum2, i32 0, i32 0) + + %result = add i32 %val1, %val2 + store i32 %result, ptr addrspace(1) %output + ret void +} + +; Different buffer load variants - byte load +define amdgpu_kernel void @test_buffer_load_byte(ptr addrspace(1) %output, i32 %soffset) { +; CHECK-LABEL: test_buffer_load_byte: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dword s6, s[4:5], 0x2c +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: buffer_load_ubyte v0, v0, s[0:3], s6 offen +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_store_dword v1, v0, s[0:1] +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %voffset = call i32 @llvm.amdgcn.workitem.id.x() + %sum = add i32 %voffset, %soffset + %val = call i8 @llvm.amdgcn.raw.buffer.load.i8(<4 x i32> %desc, i32 %sum, i32 0, i32 0) + %ext = zext i8 %val to i32 + store i32 %ext, ptr addrspace(1) %output + ret void +} + +; Different buffer load variants - short load +define amdgpu_kernel void @test_buffer_load_short(ptr addrspace(1) %output, i32 %soffset) { +; CHECK-LABEL: test_buffer_load_short: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dword s6, s[4:5], 0x2c +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: buffer_load_ushort v0, v0, s[0:3], s6 offen +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_store_dword v1, v0, s[0:1] +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %voffset = call i32 @llvm.amdgcn.workitem.id.x() + %sum = add i32 %voffset, %soffset + %val = call i16 @llvm.amdgcn.raw.buffer.load.i16(<4 x i32> %desc, i32 %sum, i32 0, i32 0) + %ext = zext i16 %val to i32 + store i32 %ext, ptr addrspace(1) %output + ret void +} + +; Vector loads - v2i32 +define amdgpu_kernel void @test_buffer_load_v2i32(ptr addrspace(1) %output, i32 %soffset) { +; CHECK-LABEL: test_buffer_load_v2i32: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dword s6, s[4:5], 0x2c +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: v_mov_b32_e32 v2, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: buffer_load_dwordx2 v[0:1], v0, s[0:3], s6 offen +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %voffset = call i32 @llvm.amdgcn.workitem.id.x() + %sum = add i32 %voffset, %soffset + %val = call <2 x i32> @llvm.amdgcn.raw.buffer.load.v2i32(<4 x i32> %desc, i32 %sum, i32 0, i32 0) + store <2 x i32> %val, ptr addrspace(1) %output + ret void +} + +; Vector loads - v4i32 +define amdgpu_kernel void @test_buffer_load_v4i32(ptr addrspace(1) %output, i32 %soffset) { +; CHECK-LABEL: test_buffer_load_v4i32: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dword s6, s[4:5], 0x2c +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: v_mov_b32_e32 v4, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: buffer_load_dwordx4 v[0:3], v0, s[0:3], s6 offen +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %voffset = call i32 @llvm.amdgcn.workitem.id.x() + %sum = add i32 %voffset, %soffset + %val = call <4 x i32> @llvm.amdgcn.raw.buffer.load.v4i32(<4 x i32> %desc, i32 %sum, i32 0, i32 0) + store <4 x i32> %val, ptr addrspace(1) %output + ret void +} + +; Float loads +define amdgpu_kernel void @test_buffer_load_float(ptr addrspace(1) %output, i32 %soffset) { +; CHECK-LABEL: test_buffer_load_float: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dword s6, s[4:5], 0x2c +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: buffer_load_dword v0, v0, s[0:3], s6 offen +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_store_dword v1, v0, s[0:1] +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %voffset = call i32 @llvm.amdgcn.workitem.id.x() + %sum = add i32 %voffset, %soffset + %val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %desc, i32 %sum, i32 0, i32 0) + store float %val, ptr addrspace(1) %output + ret void +} + +; Complex divergent expression + uniform +define amdgpu_kernel void @test_complex_divergent(ptr addrspace(1) %output, i32 %soffset) { +; CHECK-LABEL: test_complex_divergent: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dword s6, s[4:5], 0x2c +; CHECK-NEXT: v_add_u32_e32 v0, v0, v1 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: buffer_load_dword v0, v0, s[0:3], s6 offen +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_store_dword v1, v0, s[0:1] +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %tid_x = call i32 @llvm.amdgcn.workitem.id.x() + %tid_y = call i32 @llvm.amdgcn.workitem.id.y() + %divergent = add i32 %tid_x, %tid_y ; Still divergent + %sum = add i32 %divergent, %soffset ; divergent + uniform + %val = call i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32> %desc, i32 %sum, i32 0, i32 0) + store i32 %val, ptr addrspace(1) %output + ret void +} + +; Should NOT optimize - both operands divergent +define amdgpu_kernel void @test_both_divergent(ptr addrspace(1) %output) { +; CHECK-LABEL: test_both_divergent: +; CHECK: ; %bb.0: +; CHECK-NEXT: v_add_u32_e32 v0, v0, v1 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_store_dword v1, v0, s[0:1] +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %tid_x = call i32 @llvm.amdgcn.workitem.id.x() + %tid_y = call i32 @llvm.amdgcn.workitem.id.y() + %sum = add i32 %tid_x, %tid_y + %val = call i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32> %desc, i32 %sum, i32 0, i32 0) + store i32 %val, ptr addrspace(1) %output + ret void +} + +; Should NOT optimize - both operands uniform +define amdgpu_kernel void @test_both_uniform(ptr addrspace(1) %output, i32 %soffset1, i32 %soffset2) { +; CHECK-LABEL: test_both_uniform: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: s_add_i32 s2, s2, s3 +; CHECK-NEXT: v_mov_b32_e32 v0, s2 +; CHECK-NEXT: buffer_load_dword v0, v0, s[4:7], 0 offen +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: global_store_dword v1, v0, s[0:1] +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %sum = add i32 %soffset1, %soffset2 + %val = call i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32> %desc, i32 %sum, i32 0, i32 0) + store i32 %val, ptr addrspace(1) %output + ret void +} + +; Nested in control flow +define amdgpu_kernel void @test_control_flow(ptr addrspace(1) %output, i32 %soffset, i32 %condition) { +; CHECK-LABEL: test_control_flow: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: s_cmp_lg_u32 s3, 0 +; CHECK-NEXT: s_cbranch_scc0 .LBB11_4 +; CHECK-NEXT: ; %bb.1: ; %else +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: global_store_dword v1, v1, s[0:1] +; CHECK-NEXT: s_cbranch_execnz .LBB11_3 +; CHECK-NEXT: .LBB11_2: ; %then +; CHECK-NEXT: buffer_load_dword v0, v0, s[4:7], s2 offen +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: global_store_dword v1, v0, s[0:1] +; CHECK-NEXT: .LBB11_3: ; %end +; CHECK-NEXT: s_endpgm +; CHECK-NEXT: .LBB11_4: +; CHECK-NEXT: s_branch .LBB11_2 + %desc = call <4 x i32> asm "", "=s"() + %voffset = call i32 @llvm.amdgcn.workitem.id.x() + %cmp = icmp eq i32 %condition, 0 + br i1 %cmp, label %then, label %else + +then: + %sum = add i32 %voffset, %soffset + %val = call i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32> %desc, i32 %sum, i32 0, i32 0) + store i32 %val, ptr addrspace(1) %output + br label %end + +else: + store i32 0, ptr addrspace(1) %output + br label %end + +end: + ret void +} + +; Multiple uses of the ADD result - should still optimize buffer load +define amdgpu_kernel void @test_multiple_uses(ptr addrspace(1) %output1, ptr addrspace(1) %output2, i32 %soffset) { +; CHECK-LABEL: test_multiple_uses: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dword s6, s[4:5], 0x34 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: v_mov_b32_e32 v2, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: buffer_load_dword v1, v0, s[0:3], s6 offen +; CHECK-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; CHECK-NEXT: v_add_u32_e32 v0, s6, v0 +; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_store_dword v2, v1, s[0:1] +; CHECK-NEXT: global_store_dword v2, v0, s[2:3] +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %voffset = call i32 @llvm.amdgcn.workitem.id.x() + %sum = add i32 %voffset, %soffset + %val = call i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32> %desc, i32 %sum, i32 0, i32 0) + store i32 %val, ptr addrspace(1) %output1 + store i32 %sum, ptr addrspace(1) %output2 + ret void +} + +; Chain of operations - workitem.id -> mul -> add -> buffer_load +define amdgpu_kernel void @test_operation_chain(ptr addrspace(1) %output, i32 %soffset) { +; CHECK-LABEL: test_operation_chain: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dword s6, s[4:5], 0x2c +; CHECK-NEXT: v_mul_u32_u24_e32 v0, 4, v0 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: buffer_load_dword v0, v0, s[0:3], s6 offen +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_store_dword v1, v0, s[0:1] +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %scaled = mul i32 %tid, 4 ; Still divergent + %sum = add i32 %scaled, %soffset ; divergent + uniform + %val = call i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32> %desc, i32 %sum, i32 0, i32 0) + store i32 %val, ptr addrspace(1) %output + ret void +} + +; Should NOT optimize - Buffer load with non-zero soffset field already +define amdgpu_kernel void @test_existing_soffset(ptr addrspace(1) %output, i32 %soffset) { +; CHECK-LABEL: test_existing_soffset: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dword s6, s[4:5], 0x2c +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: v_add_u32_e32 v0, s6, v0 +; CHECK-NEXT: s_movk_i32 s6, 0x64 +; CHECK-NEXT: buffer_load_dword v0, v0, s[0:3], s6 offen +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_store_dword v1, v0, s[0:1] +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %voffset = call i32 @llvm.amdgcn.workitem.id.x() + %sum = add i32 %voffset, %soffset + %val = call i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32> %desc, i32 %sum, i32 100, i32 0) ; Non-zero soffset + store i32 %val, ptr addrspace(1) %output + ret void +} + +; Should NOT optimize - Structured buffer loads +define amdgpu_kernel void @test_struct_buffer_load(ptr addrspace(1) %output, i32 %soffset) { +; CHECK-LABEL: test_struct_buffer_load: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dword s6, s[4:5], 0x2c +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: v_add_u32_e32 v0, s6, v0 +; CHECK-NEXT: buffer_load_dword v0, v0, s[0:3], 0 idxen +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_store_dword v1, v0, s[0:1] +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %voffset = call i32 @llvm.amdgcn.workitem.id.x() + %sum = add i32 %voffset, %soffset + %val = call i32 @llvm.amdgcn.struct.buffer.load.i32(<4 x i32> %desc, i32 %sum, i32 0, i32 0, i32 0) + store i32 %val, ptr addrspace(1) %output + ret void +} +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; GFX900: {{.*}} diff --git a/llvm/test/CodeGen/AMDGPU/buffer-offset-to-soffset-stores.ll b/llvm/test/CodeGen/AMDGPU/buffer-offset-to-soffset-stores.ll new file mode 100644 index 0000000000000..da90699d8bdae --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/buffer-offset-to-soffset-stores.ll @@ -0,0 +1,399 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -global-isel=0 < %s | FileCheck -check-prefixes=CHECK,GFX900 %s + +; Test comprehensive patterns for ADD(divergent, uniform) optimization in buffer stores + +; Basic workitem.id.x + uniform for store +define amdgpu_kernel void @test_basic_workitem_uniform_store(ptr addrspace(1) %input, i32 %soffset) { +; CHECK-LABEL: test_basic_workitem_uniform_store: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_load_dword s4, s[4:5], 0x2c +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_load_dword v1, v1, s[0:1] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], s4 offen +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %voffset = call i32 @llvm.amdgcn.workitem.id.x() + %sum = add i32 %voffset, %soffset + %val = load i32, ptr addrspace(1) %input + call void @llvm.amdgcn.raw.buffer.store.i32(i32 %val, <4 x i32> %desc, i32 %sum, i32 0, i32 0) + ret void +} + +; Reversed operands (uniform + divergent) for store +define amdgpu_kernel void @test_reversed_operands_store(ptr addrspace(1) %input, i32 %soffset) { +; CHECK-LABEL: test_reversed_operands_store: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_load_dword s4, s[4:5], 0x2c +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_load_dword v1, v1, s[0:1] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], s4 offen +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %voffset = call i32 @llvm.amdgcn.workitem.id.x() + %sum = add i32 %soffset, %voffset ; Reversed: uniform + divergent + %val = load i32, ptr addrspace(1) %input + call void @llvm.amdgcn.raw.buffer.store.i32(i32 %val, <4 x i32> %desc, i32 %sum, i32 0, i32 0) + ret void +} + +; Multiple buffer stores with same pattern +define amdgpu_kernel void @test_multiple_stores(ptr addrspace(1) %input, i32 %soffset1, i32 %soffset2) { +; CHECK-LABEL: test_multiple_stores: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_load_dword v1, v1, s[0:1] +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: buffer_store_dword v1, v0, s[4:7], s2 offen +; CHECK-NEXT: v_add_u32_e32 v1, 10, v1 +; CHECK-NEXT: buffer_store_dword v1, v0, s[4:7], s3 offen +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %voffset = call i32 @llvm.amdgcn.workitem.id.x() + %val = load i32, ptr addrspace(1) %input + + %sum1 = add i32 %voffset, %soffset1 + call void @llvm.amdgcn.raw.buffer.store.i32(i32 %val, <4 x i32> %desc, i32 %sum1, i32 0, i32 0) + + %sum2 = add i32 %voffset, %soffset2 + %val2 = add i32 %val, 10 + call void @llvm.amdgcn.raw.buffer.store.i32(i32 %val2, <4 x i32> %desc, i32 %sum2, i32 0, i32 0) + + ret void +} + +; Different buffer store variants - byte store +define amdgpu_kernel void @test_buffer_store_byte(ptr addrspace(1) %input, i32 %soffset) { +; CHECK-LABEL: test_buffer_store_byte: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_load_dword s4, s[4:5], 0x2c +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_load_dword v1, v1, s[0:1] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: buffer_store_byte v1, v0, s[0:3], s4 offen +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %voffset = call i32 @llvm.amdgcn.workitem.id.x() + %sum = add i32 %voffset, %soffset + %val = load i32, ptr addrspace(1) %input + %trunc = trunc i32 %val to i8 + call void @llvm.amdgcn.raw.buffer.store.i8(i8 %trunc, <4 x i32> %desc, i32 %sum, i32 0, i32 0) + ret void +} + +; Different buffer store variants - short store +define amdgpu_kernel void @test_buffer_store_short(ptr addrspace(1) %input, i32 %soffset) { +; CHECK-LABEL: test_buffer_store_short: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_load_dword s4, s[4:5], 0x2c +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_load_dword v1, v1, s[0:1] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: buffer_store_short v1, v0, s[0:3], s4 offen +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %voffset = call i32 @llvm.amdgcn.workitem.id.x() + %sum = add i32 %voffset, %soffset + %val = load i32, ptr addrspace(1) %input + %trunc = trunc i32 %val to i16 + call void @llvm.amdgcn.raw.buffer.store.i16(i16 %trunc, <4 x i32> %desc, i32 %sum, i32 0, i32 0) + ret void +} + +; Vector stores - v2i32 +define amdgpu_kernel void @test_buffer_store_v2i32(ptr addrspace(1) %input, i32 %soffset) { +; CHECK-LABEL: test_buffer_store_v2i32: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_load_dword s4, s[4:5], 0x2c +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx2 v[1:2], v1, s[0:1] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: buffer_store_dwordx2 v[1:2], v0, s[0:3], s4 offen +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %voffset = call i32 @llvm.amdgcn.workitem.id.x() + %sum = add i32 %voffset, %soffset + %val = load <2 x i32>, ptr addrspace(1) %input + call void @llvm.amdgcn.raw.buffer.store.v2i32(<2 x i32> %val, <4 x i32> %desc, i32 %sum, i32 0, i32 0) + ret void +} + +; Vector stores - v4i32 +define amdgpu_kernel void @test_buffer_store_v4i32(ptr addrspace(1) %input, i32 %soffset) { +; CHECK-LABEL: test_buffer_store_v4i32: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_load_dword s4, s[4:5], 0x2c +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 v[1:4], v1, s[0:1] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: buffer_store_dwordx4 v[1:4], v0, s[0:3], s4 offen +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %voffset = call i32 @llvm.amdgcn.workitem.id.x() + %sum = add i32 %voffset, %soffset + %val = load <4 x i32>, ptr addrspace(1) %input + call void @llvm.amdgcn.raw.buffer.store.v4i32(<4 x i32> %val, <4 x i32> %desc, i32 %sum, i32 0, i32 0) + ret void +} + +; Float stores +define amdgpu_kernel void @test_buffer_store_float(ptr addrspace(1) %input, i32 %soffset) { +; CHECK-LABEL: test_buffer_store_float: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_load_dword s4, s[4:5], 0x2c +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_load_dword v1, v1, s[0:1] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], s4 offen +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %voffset = call i32 @llvm.amdgcn.workitem.id.x() + %sum = add i32 %voffset, %soffset + %val = load float, ptr addrspace(1) %input + call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %desc, i32 %sum, i32 0, i32 0) + ret void +} + +; Complex divergent expression + uniform for store +define amdgpu_kernel void @test_complex_divergent_store(ptr addrspace(1) %input, i32 %soffset) { +; CHECK-LABEL: test_complex_divergent_store: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; CHECK-NEXT: v_mov_b32_e32 v2, 0 +; CHECK-NEXT: s_load_dword s4, s[4:5], 0x2c +; CHECK-NEXT: v_add_u32_e32 v0, v0, v1 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_load_dword v2, v2, s[0:1] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], s4 offen +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %tid_x = call i32 @llvm.amdgcn.workitem.id.x() + %tid_y = call i32 @llvm.amdgcn.workitem.id.y() + %divergent = add i32 %tid_x, %tid_y ; Still divergent + %sum = add i32 %divergent, %soffset ; divergent + uniform + %val = load i32, ptr addrspace(1) %input + call void @llvm.amdgcn.raw.buffer.store.i32(i32 %val, <4 x i32> %desc, i32 %sum, i32 0, i32 0) + ret void +} + +; Should NOT optimize - both operands divergent +define amdgpu_kernel void @test_both_divergent_store(ptr addrspace(1) %input) { +; CHECK-LABEL: test_both_divergent_store: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; CHECK-NEXT: v_mov_b32_e32 v2, 0 +; CHECK-NEXT: v_add_u32_e32 v0, v0, v1 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_load_dword v2, v2, s[0:1] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %tid_x = call i32 @llvm.amdgcn.workitem.id.x() + %tid_y = call i32 @llvm.amdgcn.workitem.id.y() + %sum = add i32 %tid_x, %tid_y + %val = load i32, ptr addrspace(1) %input + call void @llvm.amdgcn.raw.buffer.store.i32(i32 %val, <4 x i32> %desc, i32 %sum, i32 0, i32 0) + ret void +} + +; Should NOT optimize - both operands uniform +define amdgpu_kernel void @test_both_uniform_store(ptr addrspace(1) %input, i32 %soffset1, i32 %soffset2) { +; CHECK-LABEL: test_both_uniform_store: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; CHECK-NEXT: v_mov_b32_e32 v0, 0 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_load_dword v0, v0, s[0:1] +; CHECK-NEXT: s_add_i32 s0, s2, s3 +; CHECK-NEXT: v_mov_b32_e32 v1, s0 +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: buffer_store_dword v0, v1, s[4:7], 0 offen +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %sum = add i32 %soffset1, %soffset2 + %val = load i32, ptr addrspace(1) %input + call void @llvm.amdgcn.raw.buffer.store.i32(i32 %val, <4 x i32> %desc, i32 %sum, i32 0, i32 0) + ret void +} + +; Nested in control flow +define amdgpu_kernel void @test_control_flow_store(ptr addrspace(1) %input, i32 %soffset, i32 %condition) { +; CHECK-LABEL: test_control_flow_store: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_load_dword v1, v1, s[0:1] +; CHECK-NEXT: s_cmp_lg_u32 s3, 0 +; CHECK-NEXT: s_cbranch_scc0 .LBB11_4 +; CHECK-NEXT: ; %bb.1: ; %else +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: buffer_store_dword v1, v0, s[4:7], 0 offen +; CHECK-NEXT: s_cbranch_execnz .LBB11_3 +; CHECK-NEXT: .LBB11_2: ; %then +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: buffer_store_dword v1, v0, s[4:7], s2 offen +; CHECK-NEXT: .LBB11_3: ; %end +; CHECK-NEXT: s_endpgm +; CHECK-NEXT: .LBB11_4: +; CHECK-NEXT: s_branch .LBB11_2 + %desc = call <4 x i32> asm "", "=s"() + %voffset = call i32 @llvm.amdgcn.workitem.id.x() + %val = load i32, ptr addrspace(1) %input + %cmp = icmp eq i32 %condition, 0 + br i1 %cmp, label %then, label %else + +then: + %sum = add i32 %voffset, %soffset + call void @llvm.amdgcn.raw.buffer.store.i32(i32 %val, <4 x i32> %desc, i32 %sum, i32 0, i32 0) + br label %end + +else: + call void @llvm.amdgcn.raw.buffer.store.i32(i32 %val, <4 x i32> %desc, i32 %voffset, i32 0, i32 0) + br label %end + +end: + ret void +} + +; Multiple uses of the ADD result - should still optimize buffer store +define amdgpu_kernel void @test_multiple_uses_store(ptr addrspace(1) %input, ptr addrspace(1) %output, i32 %soffset) { +; CHECK-LABEL: test_multiple_uses_store: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; CHECK-NEXT: s_load_dword s8, s[4:5], 0x34 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_load_dword v2, v1, s[0:1] +; CHECK-NEXT: v_add_u32_e32 v3, s8, v0 +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: buffer_store_dword v2, v0, s[4:7], s8 offen +; CHECK-NEXT: global_store_dword v1, v3, s[2:3] +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %voffset = call i32 @llvm.amdgcn.workitem.id.x() + %sum = add i32 %voffset, %soffset + %val = load i32, ptr addrspace(1) %input + call void @llvm.amdgcn.raw.buffer.store.i32(i32 %val, <4 x i32> %desc, i32 %sum, i32 0, i32 0) + store i32 %sum, ptr addrspace(1) %output + ret void +} + +; Chain of operations - workitem.id -> mul -> add -> buffer_store +define amdgpu_kernel void @test_operation_chain_store(ptr addrspace(1) %input, i32 %soffset) { +; CHECK-LABEL: test_operation_chain_store: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_load_dword s4, s[4:5], 0x2c +; CHECK-NEXT: v_mul_u32_u24_e32 v0, 4, v0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_load_dword v1, v1, s[0:1] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], s4 offen +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %scaled = mul i32 %tid, 4 ; Still divergent + %sum = add i32 %scaled, %soffset ; divergent + uniform + %val = load i32, ptr addrspace(1) %input + call void @llvm.amdgcn.raw.buffer.store.i32(i32 %val, <4 x i32> %desc, i32 %sum, i32 0, i32 0) + ret void +} + +; Should NOT optimize - Buffer store with non-zero soffset field already +define amdgpu_kernel void @test_existing_soffset_store(ptr addrspace(1) %input, i32 %soffset) { +; CHECK-LABEL: test_existing_soffset_store: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_load_dword s4, s[4:5], 0x2c +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_load_dword v1, v1, s[0:1] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: v_add_u32_e32 v0, s4, v0 +; CHECK-NEXT: s_movk_i32 s4, 0x64 +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], s4 offen +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %voffset = call i32 @llvm.amdgcn.workitem.id.x() + %sum = add i32 %voffset, %soffset + %val = load i32, ptr addrspace(1) %input + call void @llvm.amdgcn.raw.buffer.store.i32(i32 %val, <4 x i32> %desc, i32 %sum, i32 100, i32 0) ; Non-zero soffset + ret void +} + +; Should NOT optimize - Structured buffer stores +define amdgpu_kernel void @test_struct_buffer_store(ptr addrspace(1) %input, i32 %soffset) { +; CHECK-LABEL: test_struct_buffer_store: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_load_dword s4, s[4:5], 0x2c +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_load_dword v1, v1, s[0:1] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: v_add_u32_e32 v0, s4, v0 +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: buffer_store_dword v1, v0, s[0:3], 0 idxen +; CHECK-NEXT: s_endpgm + %desc = call <4 x i32> asm "", "=s"() + %voffset = call i32 @llvm.amdgcn.workitem.id.x() + %sum = add i32 %voffset, %soffset + %val = load i32, ptr addrspace(1) %input + call void @llvm.amdgcn.struct.buffer.store.i32(i32 %val, <4 x i32> %desc, i32 %sum, i32 0, i32 0, i32 0) + ret void +} +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; GFX900: {{.*}} diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.ll index e6a59f43ad690..f1fa359d6ff9c 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.ll @@ -435,17 +435,21 @@ main_body: } define amdgpu_ps <4 x float> @buffer_load_negative_offset(<4 x i32> inreg, i32 %ofs) { +; PREGFX10-LABEL: buffer_load_negative_offset: +; PREGFX10: ; %bb.0: ; %main_body +; PREGFX10-NEXT: buffer_load_dwordx4 v[0:3], v0, s[0:3], -16 offen +; PREGFX10-NEXT: s_waitcnt vmcnt(0) +; PREGFX10-NEXT: ; return to shader part epilog +; ; GFX10-LABEL: buffer_load_negative_offset: ; GFX10: ; %bb.0: ; %main_body -; GFX10-NEXT: v_add_nc_u32_e32 v0, -16, v0 -; GFX10-NEXT: buffer_load_dwordx4 v[0:3], v0, s[0:3], 0 offen +; GFX10-NEXT: buffer_load_dwordx4 v[0:3], v0, s[0:3], -16 offen ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: ; return to shader part epilog ; ; GFX11-LABEL: buffer_load_negative_offset: ; GFX11: ; %bb.0: ; %main_body -; GFX11-NEXT: v_add_nc_u32_e32 v0, -16, v0 -; GFX11-NEXT: buffer_load_b128 v[0:3], v0, s[0:3], 0 offen +; GFX11-NEXT: buffer_load_b128 v[0:3], v0, s[0:3], -16 offen ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: ; return to shader part epilog ; diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.ll index b5d741b99c582..4a5303bf63b86 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.ll @@ -383,17 +383,21 @@ main_body: } define amdgpu_ps <4 x float> @buffer_load_negative_offset(ptr addrspace(8) inreg, i32 %ofs) { +; PREGFX10-LABEL: buffer_load_negative_offset: +; PREGFX10: ; %bb.0: ; %main_body +; PREGFX10-NEXT: buffer_load_dwordx4 v[0:3], v0, s[0:3], -16 offen +; PREGFX10-NEXT: s_waitcnt vmcnt(0) +; PREGFX10-NEXT: ; return to shader part epilog +; ; GFX10-LABEL: buffer_load_negative_offset: ; GFX10: ; %bb.0: ; %main_body -; GFX10-NEXT: v_add_nc_u32_e32 v0, -16, v0 -; GFX10-NEXT: buffer_load_dwordx4 v[0:3], v0, s[0:3], 0 offen +; GFX10-NEXT: buffer_load_dwordx4 v[0:3], v0, s[0:3], -16 offen ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: ; return to shader part epilog ; ; GFX11-LABEL: buffer_load_negative_offset: ; GFX11: ; %bb.0: ; %main_body -; GFX11-NEXT: v_add_nc_u32_e32 v0, -16, v0 -; GFX11-NEXT: buffer_load_b128 v[0:3], v0, s[0:3], 0 offen +; GFX11-NEXT: buffer_load_b128 v[0:3], v0, s[0:3], -16 offen ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: ; return to shader part epilog main_body: diff --git a/llvm/test/CodeGen/AMDGPU/smrd.ll b/llvm/test/CodeGen/AMDGPU/smrd.ll index 0c3b7983442f5..839f3971c2591 100644 --- a/llvm/test/CodeGen/AMDGPU/smrd.ll +++ b/llvm/test/CodeGen/AMDGPU/smrd.ll @@ -359,8 +359,8 @@ main_body: ; GCN-LABEL: {{^}}smrd_vgpr_offset_imm_too_large: ; GCN-NEXT: %bb. -; SICI-NEXT: v_add_{{i|u}}32_e32 v0, {{(vcc, )?}}0x1000, v0 -; SICI-NEXT: buffer_load_dword v{{[0-9]}}, v0, s[0:3], 0 offen ; +; SICI-NEXT: s_movk_i32 s{{[0-9]+}}, 0x1000 +; SICI-NEXT: buffer_load_dword v{{[0-9]}}, v0, s[0:3], s{{[0-9]+}} offen ; ; VIGFX9_10-NEXT: buffer_load_dword v{{[0-9]}}, v0, s[0:3], 4 offen offset:4092 ; define amdgpu_ps float @smrd_vgpr_offset_imm_too_large(<4 x i32> inreg %desc, i32 %offset) #0 { main_body: @@ -568,11 +568,11 @@ main_body: } ; GCN-LABEL: {{^}}smrd_load_nonconst4: -; SICI: v_add_i32_e32 v{{[0-9]+}}, vcc, 0xff8, v0 ; -; SICI-DAG: buffer_load_dwordx4 v[0:3], v{{[0-9]+}}, s[0:3], 0 offen ; -; SICI-DAG: buffer_load_dwordx4 v[4:7], v{{[0-9]+}}, s[0:3], 0 offen offset:16 ; -; SICI-DAG: buffer_load_dwordx4 v[8:11], v{{[0-9]+}}, s[0:3], 0 offen offset:32 ; -; SICI-DAG: buffer_load_dwordx4 v[12:15], v{{[0-9]+}}, s[0:3], 0 offen offset:48 ; +; SICI: s_movk_i32 s{{[0-9]+}}, 0xff8 +; SICI-DAG: buffer_load_dwordx4 v[0:3], v{{[0-9]+}}, s[0:3], s{{[0-9]+}} offen ; +; SICI-DAG: buffer_load_dwordx4 v[4:7], v{{[0-9]+}}, s[0:3], s{{[0-9]+}} offen offset:16 ; +; SICI-DAG: buffer_load_dwordx4 v[8:11], v{{[0-9]+}}, s[0:3], s{{[0-9]+}} offen offset:32 ; +; SICI-DAG: buffer_load_dwordx4 v[12:15], v{{[0-9]+}}, s[0:3], s{{[0-9]+}} offen offset:48 ; ; VIGFX9_10-DAG: buffer_load_dwordx4 v[0:3], v{{[0-9]+}}, s[0:3], 56 offen offset:4032 ; ; VIGFX9_10-DAG: buffer_load_dwordx4 v[4:7], v{{[0-9]+}}, s[0:3], 56 offen offset:4048 ; ; VIGFX9_10-DAG: buffer_load_dwordx4 v[8:11], v{{[0-9]+}}, s[0:3], 56 offen offset:4064 ; @@ -587,11 +587,11 @@ main_body: } ; GCN-LABEL: {{^}}smrd_load_nonconst5: -; SICI: v_add_i32_e32 v{{[0-9]+}}, vcc, 0x1004, v0 -; SICI-DAG: buffer_load_dwordx4 v[0:3], v{{[0-9]+}}, s[0:3], 0 offen ; -; SICI-DAG: buffer_load_dwordx4 v[4:7], v{{[0-9]+}}, s[0:3], 0 offen offset:16 ; -; SICI-DAG: buffer_load_dwordx4 v[8:11], v{{[0-9]+}}, s[0:3], 0 offen offset:32 ; -; SICI-DAG: buffer_load_dwordx4 v[12:15], v{{[0-9]+}}, s[0:3], 0 offen offset:48 ; +; SICI: s_movk_i32 s{{[0-9]+}}, 0x1004 +; SICI-DAG: buffer_load_dwordx4 v[0:3], v{{[0-9]+}}, s[0:3], s{{[0-9]+}} offen ; +; SICI-DAG: buffer_load_dwordx4 v[4:7], v{{[0-9]+}}, s[0:3], s{{[0-9]+}} offen offset:16 ; +; SICI-DAG: buffer_load_dwordx4 v[8:11], v{{[0-9]+}}, s[0:3], s{{[0-9]+}} offen offset:32 ; +; SICI-DAG: buffer_load_dwordx4 v[12:15], v{{[0-9]+}}, s[0:3], s{{[0-9]+}} offen offset:48 ; ; VIGFX9_10: s_movk_i32 s4, 0xfc0 ; VIGFX9_10-DAG: buffer_load_dwordx4 v[0:3], v{{[0-9]+}}, s[0:3], s4 offen offset:68 ; ; VIGFX9_10-DAG: buffer_load_dwordx4 v[4:7], v{{[0-9]+}}, s[0:3], s4 offen offset:84 ;