Skip to content

Commit

Permalink
[AMDGPU][SelectionDAG] Don't combine uniform multiplies to MUL_[UI]24
Browse files Browse the repository at this point in the history
Prefer to keep uniform (non-divergent) multiplies on the scalar ALU when
possible. This significantly improves some game cases by eliminating
v_readfirstlane instructions when the result feeds into a scalar
operation, like the address calculation for a scalar load or store.

Since isDivergent is only an approximation of whether a value is in
SGPRs, it can potentially regress some situations where a uniform value
ends up in a VGPR. These should be rare in real code, although the test
changes do contain a number of examples.

Most of the test changes are just using s_mul instead of v_mul/mad which
is generally better for both register pressure and latency (at least on
GFX10 where sgpr pressure doesn't affect occupancy and vector ALU
instructions have significantly longer latency than scalar ALU). Some
R600 tests now use MULLO_INT instead of MUL_UINT24.

GlobalISel appears to handle more scenarios in the desirable way,
although it can also be thrown off and fails to select the 24-bit
multiplies in some cases.

Alternative solution considered and rejected was to allow selecting
MUL_[UI]24 to S_MUL_I32. I've rejected this because the definition of
those SD operations works is don't-care on the most significant 8 bits,
and this fact is used in some combines via SimplifyDemandedBits.

Based on a patch by Nicolai Hähnle.

Differential Revision: https://reviews.llvm.org/D97063
  • Loading branch information
nhaehnle authored and jayfoad committed Feb 23, 2021
1 parent 19c2e12 commit 52bc2e7
Show file tree
Hide file tree
Showing 20 changed files with 239 additions and 200 deletions.
7 changes: 7 additions & 0 deletions llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3310,6 +3310,13 @@ SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
EVT VT = N->getValueType(0);

// Don't generate 24-bit multiplies on values that are in SGPRs, since
// we only have a 32-bit scalar multiply (avoid values being moved to VGPRs
// unnecessarily). isDivergent() is used as an approximation of whether the
// value is in an SGPR.
if (!N->isDivergent())
return SDValue();

unsigned Size = VT.getSizeInBits();
if (VT.isVector() || Size > 64)
return SDValue();
Expand Down
50 changes: 26 additions & 24 deletions llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
Original file line number Diff line number Diff line change
Expand Up @@ -5798,36 +5798,37 @@ define amdgpu_kernel void @urem_i32_oddk_denom(i32 addrspace(1)* %out, i32 %x) {
;
; GFX6-LABEL: urem_i32_oddk_denom:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
; GFX6-NEXT: s_load_dword s0, s[0:1], 0xb
; GFX6-NEXT: s_load_dword s4, s[0:1], 0xb
; GFX6-NEXT: v_mov_b32_e32 v0, 0xb2a50881
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_mov_b32 s2, 0x12d8fb
; GFX6-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: v_mul_hi_u32 v0, s0, v0
; GFX6-NEXT: v_sub_i32_e32 v1, vcc, s0, v0
; GFX6-NEXT: v_mul_hi_u32 v0, s4, v0
; GFX6-NEXT: v_sub_i32_e32 v1, vcc, s4, v0
; GFX6-NEXT: v_lshrrev_b32_e32 v1, 1, v1
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
; GFX6-NEXT: v_lshrrev_b32_e32 v0, 20, v0
; GFX6-NEXT: v_mul_u32_u24_e32 v0, 0x12d8fb, v0
; GFX6-NEXT: v_sub_i32_e32 v0, vcc, s0, v0
; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX6-NEXT: v_mul_lo_u32 v0, v0, s2
; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_sub_i32_e32 v0, vcc, s4, v0
; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: urem_i32_oddk_denom:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v1, 0x12d8fb
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_mul_hi_u32 s0, s4, 0xb2a50881
; GFX9-NEXT: s_sub_i32 s1, s4, s0
; GFX9-NEXT: s_lshr_b32 s1, s1, 1
; GFX9-NEXT: s_add_i32 s1, s1, s0
; GFX9-NEXT: s_lshr_b32 s0, s1, 20
; GFX9-NEXT: v_mul_u32_u24_e32 v1, s0, v1
; GFX9-NEXT: v_sub_u32_e32 v1, s4, v1
; GFX9-NEXT: s_mul_i32 s0, s0, 0x12d8fb
; GFX9-NEXT: s_sub_i32 s0, s4, s0
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: global_store_dword v0, v1, s[2:3]
; GFX9-NEXT: s_endpgm
%r = urem i32 %x, 1235195
Expand Down Expand Up @@ -6642,36 +6643,37 @@ define amdgpu_kernel void @srem_i32_oddk_denom(i32 addrspace(1)* %out, i32 %x) {
;
; GFX6-LABEL: srem_i32_oddk_denom:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
; GFX6-NEXT: s_load_dword s0, s[0:1], 0xb
; GFX6-NEXT: s_load_dword s4, s[0:1], 0xb
; GFX6-NEXT: v_mov_b32_e32 v0, 0xd9528441
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_mov_b32 s2, 0x12d8fb
; GFX6-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX6-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
; GFX6-NEXT: v_mul_hi_i32 v0, s0, v0
; GFX6-NEXT: v_add_i32_e32 v0, vcc, s0, v0
; GFX6-NEXT: v_mul_hi_i32 v0, s4, v0
; GFX6-NEXT: v_add_i32_e32 v0, vcc, s4, v0
; GFX6-NEXT: v_lshrrev_b32_e32 v1, 31, v0
; GFX6-NEXT: v_ashrrev_i32_e32 v0, 20, v0
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v1, v0
; GFX6-NEXT: v_mul_i32_i24_e32 v0, 0x12d8fb, v0
; GFX6-NEXT: v_sub_i32_e32 v0, vcc, s0, v0
; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX6-NEXT: v_mul_lo_u32 v0, v0, s2
; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_sub_i32_e32 v0, vcc, s4, v0
; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: srem_i32_oddk_denom:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v1, 0x12d8fb
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_mul_hi_i32 s0, s4, 0xd9528441
; GFX9-NEXT: s_add_i32 s0, s0, s4
; GFX9-NEXT: s_lshr_b32 s1, s0, 31
; GFX9-NEXT: s_ashr_i32 s0, s0, 20
; GFX9-NEXT: s_add_i32 s0, s0, s1
; GFX9-NEXT: v_mul_i32_i24_e32 v1, s0, v1
; GFX9-NEXT: v_sub_u32_e32 v1, s4, v1
; GFX9-NEXT: s_mul_i32 s0, s0, 0x12d8fb
; GFX9-NEXT: s_sub_i32 s0, s4, s0
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: global_store_dword v0, v1, s[2:3]
; GFX9-NEXT: s_endpgm
%r = srem i32 %x, 1235195
Expand Down
10 changes: 6 additions & 4 deletions llvm/test/CodeGen/AMDGPU/atomic_optimizations_buffer.ll
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,9 @@ declare i32 @llvm.amdgcn.raw.buffer.atomic.sub(i32, <4 x i32>, i32, i32, i32 imm
; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]], 5
; GCN: buffer_atomic_add v[[value]]
; GCN: s_mul_i32 s[[value:[0-9]+]], s[[popcount]], 5
; GCN: v_mov_b32_e32 v[[data:[0-9]+]], s[[value]]
; GCN: buffer_atomic_add v[[data]]
define amdgpu_kernel void @add_i32_constant(i32 addrspace(1)* %out, <4 x i32> %inout) {
entry:
%old = call i32 @llvm.amdgcn.raw.buffer.atomic.add(i32 5, <4 x i32> %inout, i32 0, i32 0, i32 0)
Expand Down Expand Up @@ -122,8 +123,9 @@ entry:
; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]], 5
; GCN: buffer_atomic_sub v[[value]]
; GCN: s_mul_i32 s[[value:[0-9]+]], s[[popcount]], 5
; GCN: v_mov_b32_e32 v[[data:[0-9]+]], s[[value]]
; GCN: buffer_atomic_sub v[[data]]
define amdgpu_kernel void @sub_i32_constant(i32 addrspace(1)* %out, <4 x i32> %inout) {
entry:
%old = call i32 @llvm.amdgcn.raw.buffer.atomic.sub(i32 5, <4 x i32> %inout, i32 0, i32 0, i32 0)
Expand Down
20 changes: 12 additions & 8 deletions llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,9 @@ declare i32 @llvm.amdgcn.workitem.id.x()
; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]], 5
; GCN: {{flat|buffer|global}}_atomic_add v[[value]]
; GCN: s_mul_i32 s[[value:[0-9]+]], s[[popcount]], 5
; GCN: v_mov_b32_e32 v[[data:[0-9]+]], s[[value]]
; GCN: {{flat|buffer|global}}_atomic_add v[[data]]
define amdgpu_kernel void @add_i32_constant(i32 addrspace(1)* %out, i32 addrspace(1)* %inout) {
entry:
%old = atomicrmw add i32 addrspace(1)* %inout, i32 5 acq_rel
Expand Down Expand Up @@ -75,8 +76,9 @@ entry:
; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
; GCN: v_mul_hi_u32_u24{{(_e[0-9]+)?}} v[[value_hi:[0-9]+]], s[[popcount]], 5
; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value_lo:[0-9]+]], s[[popcount]], 5
; GCN-DAG: s_mul_i32 s[[value:[0-9]+]], s[[popcount]], 5
; GCN-DAG: v_mul_hi_u32_u24{{(_e[0-9]+)?}} v[[value_hi:[0-9]+]], s[[popcount]], 5
; GCN: v_mov_b32_e32 v[[value_lo:[0-9]+]], s[[value]]
; GCN: {{flat|buffer|global}}_atomic_add_x2 v{{\[}}[[value_lo]]:[[value_hi]]{{\]}}
define amdgpu_kernel void @add_i64_constant(i64 addrspace(1)* %out, i64 addrspace(1)* %inout) {
entry:
Expand Down Expand Up @@ -125,8 +127,9 @@ entry:
; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]], 5
; GCN: {{flat|buffer|global}}_atomic_sub v[[value]]
; GCN: s_mul_i32 s[[value:[0-9]+]], s[[popcount]], 5
; GCN: v_mov_b32_e32 v[[data:[0-9]+]], s[[value]]
; GCN: {{flat|buffer|global}}_atomic_sub v[[data]]
define amdgpu_kernel void @sub_i32_constant(i32 addrspace(1)* %out, i32 addrspace(1)* %inout) {
entry:
%old = atomicrmw sub i32 addrspace(1)* %inout, i32 5 acq_rel
Expand Down Expand Up @@ -183,8 +186,9 @@ entry:
; GCN: s_and_saveexec_b{{32|64}} s[[exec:\[?[0-9:]+\]?]], vcc
; GCN32: s_bcnt1_i32_b32 s[[popcount:[0-9]+]], s[[exec_lo]]
; GCN64: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
; GCN: v_mul_hi_u32_u24{{(_e[0-9]+)?}} v[[value_hi:[0-9]+]], s[[popcount]], 5
; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value_lo:[0-9]+]], s[[popcount]], 5
; GCN-DAG: s_mul_i32 s[[value:[0-9]+]], s[[popcount]], 5
; GCN-DAG: v_mul_hi_u32_u24{{(_e[0-9]+)?}} v[[value_hi:[0-9]+]], s[[popcount]], 5
; GCN: v_mov_b32_e32 v[[value_lo:[0-9]+]], s[[value]]
; GCN: {{flat|buffer|global}}_atomic_sub_x2 v{{\[}}[[value_lo]]:[[value_hi]]{{\]}}
define amdgpu_kernel void @sub_i64_constant(i64 addrspace(1)* %out, i64 addrspace(1)* %inout) {
entry:
Expand Down
Loading

0 comments on commit 52bc2e7

Please sign in to comment.