Skip to content

Commit

Permalink
[AMDGPU] Fix the new atomic optimizer in pixel shaders.
Browse files Browse the repository at this point in the history
The new atomic optimizer I previously added in D51969 did not work
correctly when a pixel shader was using derivatives, and had helper
lanes active.

To fix this we add an llvm.amdgcn.ps.live call that guards a branch
around the entire atomic operation - ensuring that all helper lanes are
inactive within the wavefront when we compute our atomic results.

I've added a test case that can cause derivatives, and exposes the
problem.

Differential Revision: https://reviews.llvm.org/D53930

llvm-svn: 346128
  • Loading branch information
Neil Henning committed Nov 5, 2018
1 parent 2ae1be7 commit 233a02d
Show file tree
Hide file tree
Showing 2 changed files with 98 additions and 2 deletions.
41 changes: 39 additions & 2 deletions llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
Expand Up @@ -53,6 +53,7 @@ class AMDGPUAtomicOptimizer : public FunctionPass,
const DataLayout *DL;
DominatorTree *DT;
bool HasDPP;
bool IsPixelShader;

void optimizeAtomic(Instruction &I, Instruction::BinaryOps Op,
unsigned ValIdx, bool ValDivergent) const;
Expand Down Expand Up @@ -96,6 +97,7 @@ bool AMDGPUAtomicOptimizer::runOnFunction(Function &F) {
const TargetMachine &TM = TPC.getTM<TargetMachine>();
const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
HasDPP = ST.hasDPP();
IsPixelShader = F.getCallingConv() == CallingConv::AMDGPU_PS;

visit(F);

Expand Down Expand Up @@ -215,6 +217,31 @@ void AMDGPUAtomicOptimizer::optimizeAtomic(Instruction &I,
// Start building just before the instruction.
IRBuilder<> B(&I);

// If we are in a pixel shader, because of how we have to mask out helper
// lane invocations, we need to record the entry and exit BB's.
BasicBlock *PixelEntryBB = nullptr;
BasicBlock *PixelExitBB = nullptr;

// If we're optimizing an atomic within a pixel shader, we need to wrap the
// entire atomic operation in a helper-lane check. We do not want any helper
// lanes that are around only for the purposes of derivatives to take part
// in any cross-lane communication, and we use a branch on whether the lane is
// live to do this.
if (IsPixelShader) {
// Record I's original position as the entry block.
PixelEntryBB = I.getParent();

Value *const Cond = B.CreateIntrinsic(Intrinsic::amdgcn_ps_live, {}, {});
Instruction *const NonHelperTerminator =
SplitBlockAndInsertIfThen(Cond, &I, false, nullptr, DT, nullptr);

// Record I's new position as the exit block.
PixelExitBB = I.getParent();

I.moveBefore(NonHelperTerminator);
B.SetInsertPoint(&I);
}

Type *const Ty = I.getType();
const unsigned TyBitWidth = DL->getTypeSizeInBits(Ty);
Type *const VecTy = VectorType::get(B.getInt32Ty(), 2);
Expand Down Expand Up @@ -398,8 +425,18 @@ void AMDGPUAtomicOptimizer::optimizeAtomic(Instruction &I,
// first lane, to get our lane's index into the atomic result.
Value *const Result = B.CreateBinOp(Op, BroadcastI, LaneOffset);

// Replace the original atomic instruction with the new one.
I.replaceAllUsesWith(Result);
if (IsPixelShader) {
// Need a final PHI to reconverge to above the helper lane branch mask.
B.SetInsertPoint(PixelExitBB->getFirstNonPHI());

PHINode *const PHI = B.CreatePHI(Ty, 2);
PHI->addIncoming(UndefValue::get(Ty), PixelEntryBB);
PHI->addIncoming(Result, I.getParent());
I.replaceAllUsesWith(PHI);
} else {
// Replace the original atomic instruction with the new one.
I.replaceAllUsesWith(Result);
}

// And delete the original.
I.eraseFromParent();
Expand Down
59 changes: 59 additions & 0 deletions llvm/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll
@@ -0,0 +1,59 @@
; RUN: llc -mtriple=amdgcn-- -amdgpu-atomic-optimizations=true -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX7LESS %s
; RUN: llc -mtriple=amdgcn-- -mcpu=tonga -mattr=-flat-for-global -amdgpu-atomic-optimizations=true -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX8MORE %s
; RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 -mattr=-flat-for-global -amdgpu-atomic-optimizations=true -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX8MORE %s

declare i1 @llvm.amdgcn.wqm.vote(i1)
declare i32 @llvm.amdgcn.buffer.atomic.add(i32, <4 x i32>, i32, i32, i1)
declare void @llvm.amdgcn.buffer.store.f32(float, <4 x i32>, i32, i32, i1, i1)

; Show that what the atomic optimization pass will do for raw buffers.

; GCN-LABEL: add_i32_constant:
; GCN-LABEL: BB0_1:
; GCN: s_mov_b64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, exec
; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt_lo:[0-9]+]], s[[exec_lo]], 0
; GCN: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt_hi:[0-9]+]], s[[exec_hi]], v[[mbcnt_lo]]
; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc, 0, v[[mbcnt_hi]]
; GCN: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]], 5
; GCN: buffer_atomic_add v[[value]]
; GCN: v_readfirstlane_b32 s{{[0-9]+}}, v[[value]]
define amdgpu_ps void @add_i32_constant(<4 x i32> inreg %out, <4 x i32> inreg %inout) {
entry:
%cond1 = call i1 @llvm.amdgcn.wqm.vote(i1 true)
%old = call i32 @llvm.amdgcn.buffer.atomic.add(i32 5, <4 x i32> %inout, i32 0, i32 0, i1 0)
%cond2 = call i1 @llvm.amdgcn.wqm.vote(i1 true)
%cond = and i1 %cond1, %cond2
br i1 %cond, label %if, label %else
if:
%bitcast = bitcast i32 %old to float
call void @llvm.amdgcn.buffer.store.f32(float %bitcast, <4 x i32> %out, i32 0, i32 0, i1 0, i1 0)
ret void
else:
ret void
}

; GCN-LABEL: add_i32_varying:
; GFX7LESS-NOT: v_mbcnt_lo_u32_b32
; GFX7LESS-NOT: v_mbcnt_hi_u32_b32
; GFX8MORE: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt_lo:[0-9]+]], exec_lo, 0
; GFX8MORE: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt_hi:[0-9]+]], exec_hi, v[[mbcnt_lo]]
; GFX8MORE: v_readlane_b32 s[[scalar_value:[0-9]+]], v{{[0-9]+}}, 63
; GFX8MORE: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc, 0, v[[mbcnt_hi]]
; GFX8MORE: v_mov_b32{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[scalar_value]]
; GFX8MORE: buffer_atomic_add v[[value]]
; GFX8MORE: v_readfirstlane_b32 s{{[0-9]+}}, v[[value]]
define amdgpu_ps void @add_i32_varying(<4 x i32> inreg %out, <4 x i32> inreg %inout, i32 %val) {
entry:
%cond1 = call i1 @llvm.amdgcn.wqm.vote(i1 true)
%old = call i32 @llvm.amdgcn.buffer.atomic.add(i32 %val, <4 x i32> %inout, i32 0, i32 0, i1 0)
%cond2 = call i1 @llvm.amdgcn.wqm.vote(i1 true)
%cond = and i1 %cond1, %cond2
br i1 %cond, label %if, label %else
if:
%bitcast = bitcast i32 %old to float
call void @llvm.amdgcn.buffer.store.f32(float %bitcast, <4 x i32> %out, i32 0, i32 0, i1 0, i1 0)
ret void
else:
ret void
}

0 comments on commit 233a02d

Please sign in to comment.