Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 11 additions & 3 deletions llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4420,10 +4420,18 @@ bool AMDGPUDAGToDAGISel::isVGPRImm(const SDNode * N) const {

bool AMDGPUDAGToDAGISel::isUniformLoad(const SDNode *N) const {
const auto *Ld = cast<LoadSDNode>(N);

const MachineMemOperand *MMO = Ld->getMemOperand();
if (N->isDivergent() && !AMDGPU::isUniformMMO(MMO))
return false;

if (Ld->isDivergent()) {
// FIXME: We ought to able able to take the direct isDivergent result. We
// cannot rely on the MMO for a uniformity check, and should stop using
// it. This is a hack for 2 ways that the IR divergence analysis is superior
// to the DAG divergence: Recognizing shift-of-workitem-id as always
// uniform, and isSingleLaneExecution. These should be handled in the DAG
// version, and then this can be dropped.
if (!MMO->getValue() || !AMDGPU::isUniformMMO(MMO))
return false;
}

return MMO->getSize().hasValue() &&
Ld->getAlign() >=
Expand Down
1 change: 1 addition & 0 deletions llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ Intrinsic::ID AMDGPU::getIntrinsicID(const MachineInstr &I) {

// TODO: Should largely merge with AMDGPUTTIImpl::isSourceOfDivergence.
bool AMDGPU::isUniformMMO(const MachineMemOperand *MMO) {
// FIXME: null value is should be treated as unknown, not as uniform.
const Value *Ptr = MMO->getValue();
// UndefValue means this is a load of a kernel input. These are uniform.
// Sometimes LDS instructions have constant pointers.
Expand Down
84 changes: 84 additions & 0 deletions llvm/test/CodeGen/AMDGPU/load-select-ptr.ll
Original file line number Diff line number Diff line change
Expand Up @@ -139,3 +139,87 @@ define amdgpu_kernel void @select_ptr_crash_i64_local_offsets(i32 %tmp, ptr addr
store i64 %tmp5, ptr addrspace(1) %ptr2, align 8
ret void
}

; The resultant load cannot be treated as uniform
define amdgpu_kernel void @sample_test(ptr addrspace(1) %dest, ptr addrspace(1) %sourceA, ptr addrspace(1) %sourceB, i1 %tobool.not.i) #0 {
; GCN-LABEL: sample_test:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; GCN-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x10
; GCN-NEXT: v_lshlrev_b32_e32 v2, 3, v0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v1, s3
; GCN-NEXT: v_add_u32_e32 v0, vcc, s2, v2
; GCN-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; GCN-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
; GCN-NEXT: s_load_dword s2, s[4:5], 0x18
; GCN-NEXT: v_mov_b32_e32 v3, s1
; GCN-NEXT: v_add_u32_e32 v2, vcc, s0, v2
; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_bitcmp1_b32 s2, 0
; GCN-NEXT: s_load_dwordx2 s[2:3], s[6:7], 0x0
; GCN-NEXT: s_cselect_b64 vcc, -1, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v4, s3
; GCN-NEXT: v_mov_b32_e32 v5, s2
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_cndmask_b32_e32 v1, v4, v1, vcc
; GCN-NEXT: v_cndmask_b32_e32 v0, v5, v0, vcc
; GCN-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN-NEXT: s_endpgm
entry:
%0 = tail call i32 @llvm.amdgcn.workitem.id.x()
%conv2.i.i.i1 = zext i32 %0 to i64
%arrayidx.i = getelementptr i64, ptr addrspace(1) %sourceA, i64 %conv2.i.i.i1
%dest.gep = getelementptr i64, ptr addrspace(1) %dest, i64 %conv2.i.i.i1
%ld0 = load i64, ptr addrspace(1) %arrayidx.i, align 8, !amdgpu.noclobber !0
%ld1 = load i64, ptr addrspace(1) %sourceB, align 8
%cond.i = select i1 %tobool.not.i, i64 %ld0, i64 %ld1
store i64 %cond.i, ptr addrspace(1) %dest.gep, align 8
ret void
}

; The resultant load cannot be treated as uniform
define amdgpu_kernel void @constant_is_not_uniform(ptr addrspace(1) %dest, ptr addrspace(4) %sourceA, ptr addrspace(4) %sourceB, i1 %tobool.not.i) #0 {
; GCN-LABEL: constant_is_not_uniform:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; GCN-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x10
; GCN-NEXT: v_lshlrev_b32_e32 v2, 3, v0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v1, s3
; GCN-NEXT: v_add_u32_e32 v0, vcc, s2, v2
; GCN-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; GCN-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
; GCN-NEXT: s_load_dword s2, s[4:5], 0x18
; GCN-NEXT: v_mov_b32_e32 v3, s1
; GCN-NEXT: v_add_u32_e32 v2, vcc, s0, v2
; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_bitcmp1_b32 s2, 0
; GCN-NEXT: s_load_dwordx2 s[2:3], s[6:7], 0x0
; GCN-NEXT: s_cselect_b64 vcc, -1, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v4, s3
; GCN-NEXT: v_mov_b32_e32 v5, s2
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_cndmask_b32_e32 v1, v4, v1, vcc
; GCN-NEXT: v_cndmask_b32_e32 v0, v5, v0, vcc
; GCN-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GCN-NEXT: s_endpgm
entry:
%0 = tail call i32 @llvm.amdgcn.workitem.id.x()
%conv2.i.i.i1 = zext i32 %0 to i64
%arrayidx.i = getelementptr i64, ptr addrspace(4) %sourceA, i64 %conv2.i.i.i1
%dest.gep = getelementptr i64, ptr addrspace(1) %dest, i64 %conv2.i.i.i1
%ld0 = load i64, ptr addrspace(4) %arrayidx.i, align 8
%ld1 = load i64, ptr addrspace(4) %sourceB, align 8
%cond.i = select i1 %tobool.not.i, i64 %ld0, i64 %ld1
store i64 %cond.i, ptr addrspace(1) %dest.gep, align 8
ret void
}

attributes #0 = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" }

!0 = !{}