diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 875278a3b4f97..c681d12ba7499 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -11944,7 +11944,7 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || (AS == AMDGPUAS::GLOBAL_ADDRESS && Subtarget->getScalarizeGlobalBehavior() && Load->isSimple() && - isMemOpHasNoClobberedMemOperand(Load))) { + (Load->isInvariant() || isMemOpHasNoClobberedMemOperand(Load)))) { if ((!Op->isDivergent() || AMDGPU::isUniformMMO(MMO)) && Alignment >= Align(4) && NumElements < 32) { if (MemVT.isPow2VectorType() || diff --git a/llvm/test/CodeGen/AMDGPU/load-global-invariant.ll b/llvm/test/CodeGen/AMDGPU/load-global-invariant.ll new file mode 100644 index 0000000000000..6cdadc5bab5fb --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/load-global-invariant.ll @@ -0,0 +1,77 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s + +; Check that invariant global loads are treated like constant +; loads. The optnone disables the annotation of of amdgpu.noclobber. + +define amdgpu_kernel void @load_constant_v3i64(ptr addrspace(1) %dst, ptr addrspace(4) %src) #0 { +; CHECK-LABEL: load_constant_v3i64: +; CHECK: ; %bb.0: +; CHECK-NEXT: v_mov_b32_e32 v4, 0 +; CHECK-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x0 +; CHECK-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x8 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: s_load_dwordx4 s[0:3], s[6:7], 0x0 +; CHECK-NEXT: s_nop 0 +; CHECK-NEXT: s_load_dwordx2 s[6:7], s[6:7], 0x10 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v0, s6 +; CHECK-NEXT: v_mov_b32_e32 v1, s7 +; CHECK-NEXT: global_store_dwordx2 v4, v[0:1], s[4:5] offset:16 +; CHECK-NEXT: v_mov_b32_e32 v0, s0 +; CHECK-NEXT: v_mov_b32_e32 v1, s1 +; CHECK-NEXT: v_mov_b32_e32 v2, s2 +; CHECK-NEXT: v_mov_b32_e32 v3, s3 +; CHECK-NEXT: global_store_dwordx4 v4, v[0:3], s[4:5] +; CHECK-NEXT: s_endpgm + %ld = load <3 x i64>, ptr addrspace(4) %src, align 32 + store <3 x i64> %ld, ptr addrspace(1) %dst, align 32 + ret void +} + +define amdgpu_kernel void @load_global_v3i64(ptr addrspace(1) %dst, ptr addrspace(1) %src) #0 { +; CHECK-LABEL: load_global_v3i64: +; CHECK: ; %bb.0: +; CHECK-NEXT: v_mov_b32_e32 v6, 0 +; CHECK-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0 +; CHECK-NEXT: s_load_dwordx2 s[2:3], s[8:9], 0x8 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 v[0:3], v6, s[2:3] +; CHECK-NEXT: global_load_dwordx2 v[4:5], v6, s[2:3] offset:16 +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: global_store_dwordx2 v6, v[4:5], s[0:1] offset:16 +; CHECK-NEXT: global_store_dwordx4 v6, v[0:3], s[0:1] +; CHECK-NEXT: s_endpgm + %ld = load <3 x i64>, ptr addrspace(1) %src, align 32 + store <3 x i64> %ld, ptr addrspace(1) %dst, align 32 + ret void +} + +define amdgpu_kernel void @load_global_v3i64_invariant(ptr addrspace(1) %dst, ptr addrspace(1) %src) #0 { +; CHECK-LABEL: load_global_v3i64_invariant: +; CHECK: ; %bb.0: +; CHECK-NEXT: v_mov_b32_e32 v4, 0 +; CHECK-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x0 +; CHECK-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x8 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: s_load_dwordx4 s[0:3], s[6:7], 0x0 +; CHECK-NEXT: s_nop 0 +; CHECK-NEXT: s_load_dwordx2 s[6:7], s[6:7], 0x10 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v0, s6 +; CHECK-NEXT: v_mov_b32_e32 v1, s7 +; CHECK-NEXT: global_store_dwordx2 v4, v[0:1], s[4:5] offset:16 +; CHECK-NEXT: v_mov_b32_e32 v0, s0 +; CHECK-NEXT: v_mov_b32_e32 v1, s1 +; CHECK-NEXT: v_mov_b32_e32 v2, s2 +; CHECK-NEXT: v_mov_b32_e32 v3, s3 +; CHECK-NEXT: global_store_dwordx4 v4, v[0:3], s[4:5] +; CHECK-NEXT: s_endpgm + %ld = load <3 x i64>, ptr addrspace(1) %src, align 32, !invariant.load !0 + store <3 x i64> %ld, ptr addrspace(1) %dst, align 32 + ret void +} + +attributes #0 = { noinline nounwind optnone } + +!0 = !{}