-
Notifications
You must be signed in to change notification settings - Fork 15.4k
[AMDGPU] Add wave reduce intrinsics for double types - 1 #170811
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Open
easyonaadit
wants to merge
1
commit into
main
Choose a base branch
from
users/easyonaadit/amdgpu/wave-reduce-intrinsics-double-min-max
base: main
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
Open
[AMDGPU] Add wave reduce intrinsics for double types - 1 #170811
easyonaadit
wants to merge
1
commit into
main
from
users/easyonaadit/amdgpu/wave-reduce-intrinsics-double-min-max
+2,627
−1
Conversation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Contributor
Author
This stack of pull requests is managed by Graphite. Learn more about stacking. |
This was referenced Dec 5, 2025
Supported Ops: `min`, `max`
fbdfe55 to
e163cb4
Compare
Member
|
@llvm/pr-subscribers-backend-amdgpu Author: Aaditya (easyonaadit) ChangesSupported Ops: Patch is 133.26 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/170811.diff 4 Files Affected:
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 301f2fc8dab45..80978c6a00a9c 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5522,6 +5522,9 @@ static uint64_t getIdentityValueFor64BitWaveReduction(unsigned Opc) {
return std::numeric_limits<uint64_t>::min();
case AMDGPU::V_CMP_GT_I64_e64: // max.i64
return std::numeric_limits<int64_t>::min();
+ case AMDGPU::V_MIN_F64_e64:
+ case AMDGPU::V_MAX_F64_e64:
+ return 0x7FF8000000000000; // qNAN
case AMDGPU::S_ADD_U64_PSEUDO:
case AMDGPU::S_SUB_U64_PSEUDO:
case AMDGPU::S_OR_B64:
@@ -5547,7 +5550,8 @@ static bool is32bitWaveReduceOperation(unsigned Opc) {
static bool isFloatingPointWaveReduceOperation(unsigned Opc) {
return Opc == AMDGPU::V_MIN_F32_e64 || Opc == AMDGPU::V_MAX_F32_e64 ||
- Opc == AMDGPU::V_ADD_F32_e64 || Opc == AMDGPU::V_SUB_F32_e64;
+ Opc == AMDGPU::V_ADD_F32_e64 || Opc == AMDGPU::V_SUB_F32_e64 ||
+ Opc == AMDGPU::V_MIN_F64_e64 || Opc == AMDGPU::V_MAX_F64_e64;
}
static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
@@ -5583,6 +5587,8 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
case AMDGPU::V_CMP_LT_I64_e64: // min
case AMDGPU::V_CMP_GT_U64_e64: // umax
case AMDGPU::V_CMP_GT_I64_e64: // max
+ case AMDGPU::V_MIN_F64_e64:
+ case AMDGPU::V_MAX_F64_e64:
case AMDGPU::S_AND_B64:
case AMDGPU::S_OR_B64: {
// Idempotent operations.
@@ -5952,6 +5958,60 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
.addReg(Accumulator->getOperand(0).getReg());
break;
}
+ case AMDGPU::V_MIN_F64_e64:
+ case AMDGPU::V_MAX_F64_e64: {
+ const TargetRegisterClass *VregRC = TRI->getVGPR64Class();
+ const TargetRegisterClass *VregSubRC =
+ TRI->getSubRegisterClass(VregRC, AMDGPU::sub0);
+ Register AccumulatorVReg = MRI.createVirtualRegister(VregRC);
+ Register DstVreg = MRI.createVirtualRegister(VregRC);
+ Register LaneValLo =
+ MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
+ Register LaneValHi =
+ MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
+ BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::V_MOV_B64_PSEUDO),
+ AccumulatorVReg)
+ .addReg(Accumulator->getOperand(0).getReg());
+ if (ST.getGeneration() == AMDGPUSubtarget::Generation::GFX12) {
+ switch (Opc) {
+ case AMDGPU::V_MIN_F64_e64:
+ Opc = AMDGPU::V_MIN_NUM_F64_e64;
+ break;
+ case AMDGPU::V_MAX_F64_e64:
+ Opc = AMDGPU::V_MAX_NUM_F64_e64;
+ break;
+ }
+ }
+ auto DstVregInst = BuildMI(*ComputeLoop, I, DL, TII->get(Opc), DstVreg)
+ .addImm(0) // src0 modifiers
+ .addReg(LaneValue->getOperand(0).getReg())
+ .addImm(0) // src1 modifiers
+ .addReg(AccumulatorVReg)
+ .addImm(0) // clamp
+ .addImm(0); // omod
+ auto ReadLaneLo =
+ BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32),
+ LaneValLo);
+ auto ReadLaneHi =
+ BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32),
+ LaneValHi);
+ MachineBasicBlock::iterator Iters = *ReadLaneLo;
+ MachineOperand Op1L =
+ TII->buildExtractSubRegOrImm(Iters, MRI, DstVregInst->getOperand(0),
+ VregRC, AMDGPU::sub0, VregSubRC);
+ MachineOperand Op1H =
+ TII->buildExtractSubRegOrImm(Iters, MRI, DstVregInst->getOperand(0),
+ VregRC, AMDGPU::sub1, VregSubRC);
+ ReadLaneLo.add(Op1L);
+ ReadLaneHi.add(Op1H);
+ NewAccumulator = BuildMI(*ComputeLoop, I, DL,
+ TII->get(TargetOpcode::REG_SEQUENCE), DstReg)
+ .addReg(LaneValLo)
+ .addImm(AMDGPU::sub0)
+ .addReg(LaneValHi)
+ .addImm(AMDGPU::sub1);
+ break;
+ }
case AMDGPU::S_ADD_U64_PSEUDO:
case AMDGPU::S_SUB_U64_PSEUDO: {
NewAccumulator = BuildMI(*ComputeLoop, I, DL, TII->get(Opc), DstReg)
@@ -6009,6 +6069,8 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_CMP_LT_I64_e64);
case AMDGPU::WAVE_REDUCE_FMIN_PSEUDO_F32:
return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_MIN_F32_e64);
+ case AMDGPU::WAVE_REDUCE_FMIN_PSEUDO_F64:
+ return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_MIN_F64_e64);
case AMDGPU::WAVE_REDUCE_UMAX_PSEUDO_U32:
return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_MAX_U32);
case AMDGPU::WAVE_REDUCE_UMAX_PSEUDO_U64:
@@ -6019,6 +6081,8 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_CMP_GT_I64_e64);
case AMDGPU::WAVE_REDUCE_FMAX_PSEUDO_F32:
return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_MAX_F32_e64);
+ case AMDGPU::WAVE_REDUCE_FMAX_PSEUDO_F64:
+ return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::V_MAX_F64_e64);
case AMDGPU::WAVE_REDUCE_ADD_PSEUDO_I32:
return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_ADD_I32);
case AMDGPU::WAVE_REDUCE_ADD_PSEUDO_U64:
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index ca5a4d7301bda..82a83e12649fb 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -373,7 +373,9 @@ defvar Operations = [
WaveReduceOp<"xor", "B64", i64, SGPR_64, VSrc_b64>,
WaveReduceOp<"fmin", "F32", f32, SGPR_32, VSrc_b32>,
+ WaveReduceOp<"fmin", "F64", f64, SGPR_64, VSrc_b64>,
WaveReduceOp<"fmax", "F32", f32, SGPR_32, VSrc_b32>,
+ WaveReduceOp<"fmax", "F64", f64, SGPR_64, VSrc_b64>,
WaveReduceOp<"fadd", "F32", f32, SGPR_32, VSrc_b32>,
WaveReduceOp<"fsub", "F32", f32, SGPR_32, VSrc_b32>,
];
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.fmax.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.fmax.ll
index f02fd876f1aac..ca4091bcd3366 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.fmax.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.fmax.ll
@@ -11,6 +11,7 @@
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=1 -mattr=+wavefrontsize64 < %s | FileCheck -check-prefixes=GFX11GISEL,GFX1164GISEL %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=0 < %s | FileCheck -check-prefixes=GFX11DAGISEL,GFX1132DAGISEL %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=1 < %s | FileCheck -check-prefixes=GFX11GISEL,GFX1132GISEL %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -global-isel=0 < %s | FileCheck -check-prefixes=GFX12DAGISEL %s
define amdgpu_kernel void @uniform_value_float(ptr addrspace(1) %out, float %in) {
@@ -119,6 +120,14 @@ define amdgpu_kernel void @uniform_value_float(ptr addrspace(1) %out, float %in)
; GFX1132GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
; GFX1132GISEL-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX1132GISEL-NEXT: s_endpgm
+;
+; GFX12DAGISEL-LABEL: uniform_value_float:
+; GFX12DAGISEL: ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; GFX12DAGISEL-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX12DAGISEL-NEXT: s_endpgm
entry:
%result = call float @llvm.amdgcn.wave.reduce.fmax(float %in, i32 1)
store float %result, ptr addrspace(1) %out
@@ -357,6 +366,33 @@ define void @divergent_value_float(ptr addrspace(1) %out, float %in) {
; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, s1
; GFX1132GISEL-NEXT: global_store_b32 v[0:1], v2, off
; GFX1132GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12DAGISEL-LABEL: divergent_value_float:
+; GFX12DAGISEL: ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12DAGISEL-NEXT: s_wait_expcnt 0x0
+; GFX12DAGISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12DAGISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX12DAGISEL-NEXT: s_mov_b32 s1, 0x7fc00000
+; GFX12DAGISEL-NEXT: .LBB1_1: ; =>This Inner Loop Header: Depth=1
+; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT: s_ctz_i32_b32 s2, s0
+; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT: v_readlane_b32 s3, v2, s2
+; GFX12DAGISEL-NEXT: s_bitset0_b32 s0, s2
+; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT: s_cmp_lg_u32 s0, 0
+; GFX12DAGISEL-NEXT: v_max_num_f32_e64 v3, s1, s3
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12DAGISEL-NEXT: v_readfirstlane_b32 s1, v3
+; GFX12DAGISEL-NEXT: s_cbranch_scc1 .LBB1_1
+; GFX12DAGISEL-NEXT: ; %bb.2:
+; GFX12DAGISEL-NEXT: s_wait_alu depctr_va_sdst(0)
+; GFX12DAGISEL-NEXT: v_mov_b32_e32 v2, s1
+; GFX12DAGISEL-NEXT: global_store_b32 v[0:1], v2, off
+; GFX12DAGISEL-NEXT: s_setpc_b64 s[30:31]
entry:
%result = call float @llvm.amdgcn.wave.reduce.fmax(float %in, i32 1)
store float %result, ptr addrspace(1) %out
@@ -905,6 +941,68 @@ define void @divergent_cfg_float(ptr addrspace(1) %out, float %in, float %in2) {
; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, s0
; GFX1132GISEL-NEXT: global_store_b32 v[0:1], v2, off
; GFX1132GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12DAGISEL-LABEL: divergent_cfg_float:
+; GFX12DAGISEL: ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12DAGISEL-NEXT: s_wait_expcnt 0x0
+; GFX12DAGISEL-NEXT: s_wait_samplecnt 0x0
+; GFX12DAGISEL-NEXT: s_wait_bvhcnt 0x0
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v31
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc_lo, 15, v4
+; GFX12DAGISEL-NEXT: ; implicit-def: $vgpr4
+; GFX12DAGISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX12DAGISEL-NEXT: s_cbranch_execz .LBB2_4
+; GFX12DAGISEL-NEXT: ; %bb.1: ; %else
+; GFX12DAGISEL-NEXT: s_mov_b32 s1, exec_lo
+; GFX12DAGISEL-NEXT: s_mov_b32 s2, 0x7fc00000
+; GFX12DAGISEL-NEXT: .LBB2_2: ; =>This Inner Loop Header: Depth=1
+; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT: s_ctz_i32_b32 s3, s1
+; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT: v_readlane_b32 s4, v2, s3
+; GFX12DAGISEL-NEXT: s_bitset0_b32 s1, s3
+; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT: s_cmp_lg_u32 s1, 0
+; GFX12DAGISEL-NEXT: v_max_num_f32_e64 v3, s2, s4
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12DAGISEL-NEXT: v_readfirstlane_b32 s2, v3
+; GFX12DAGISEL-NEXT: s_cbranch_scc1 .LBB2_2
+; GFX12DAGISEL-NEXT: ; %bb.3:
+; GFX12DAGISEL-NEXT: s_wait_alu depctr_va_sdst(0)
+; GFX12DAGISEL-NEXT: v_mov_b32_e32 v4, s2
+; GFX12DAGISEL-NEXT: ; implicit-def: $vgpr3
+; GFX12DAGISEL-NEXT: .LBB2_4: ; %Flow
+; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX12DAGISEL-NEXT: s_cbranch_execz .LBB2_8
+; GFX12DAGISEL-NEXT: ; %bb.5: ; %if
+; GFX12DAGISEL-NEXT: s_mov_b32 s1, exec_lo
+; GFX12DAGISEL-NEXT: s_mov_b32 s2, 0x7fc00000
+; GFX12DAGISEL-NEXT: .LBB2_6: ; =>This Inner Loop Header: Depth=1
+; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT: s_ctz_i32_b32 s3, s1
+; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT: v_readlane_b32 s4, v3, s3
+; GFX12DAGISEL-NEXT: s_bitset0_b32 s1, s3
+; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT: s_cmp_lg_u32 s1, 0
+; GFX12DAGISEL-NEXT: v_max_num_f32_e64 v2, s2, s4
+; GFX12DAGISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12DAGISEL-NEXT: v_readfirstlane_b32 s2, v2
+; GFX12DAGISEL-NEXT: s_cbranch_scc1 .LBB2_6
+; GFX12DAGISEL-NEXT: ; %bb.7:
+; GFX12DAGISEL-NEXT: s_wait_alu depctr_va_sdst(0)
+; GFX12DAGISEL-NEXT: v_mov_b32_e32 v4, s2
+; GFX12DAGISEL-NEXT: .LBB2_8: ; %endif
+; GFX12DAGISEL-NEXT: s_wait_alu depctr_sa_sdst(0)
+; GFX12DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12DAGISEL-NEXT: global_store_b32 v[0:1], v4, off
+; GFX12DAGISEL-NEXT: s_setpc_b64 s[30:31]
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%d_cmp = icmp ult i32 %tid, 16
@@ -923,6 +1021,1188 @@ endif:
store float %combine, ptr addrspace(1) %out
ret void
}
+
+define amdgpu_kernel void @uniform_value_double(ptr addrspace(1) %out, double %in) {
+; GFX8DAGISEL-LABEL: uniform_value_double:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s3
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_endpgm
+;
+; GFX8GISEL-LABEL: uniform_value_double:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s1
+; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8GISEL-NEXT: s_endpgm
+;
+; GFX9DAGISEL-LABEL: uniform_value_double:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9DAGISEL-NEXT: s_endpgm
+;
+; GFX9GISEL-LABEL: uniform_value_double:
+; GFX9GISEL: ; %bb.0: ; %entry
+; GFX9GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9GISEL-NEXT: s_endpgm
+;
+; GFX10DAGISEL-LABEL: uniform_value_double:
+; GFX10DAGISEL: ; %bb.0: ; %entry
+; GFX10DAGISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX10DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX10DAGISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10DAGISEL-NEXT: s_endpgm
+;
+; GFX10GISEL-LABEL: uniform_value_double:
+; GFX10GISEL: ; %bb.0: ; %entry
+; GFX10GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX10GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX10GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX10GISEL-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10GISEL-NEXT: s_endpgm
+;
+; GFX1164DAGISEL-LABEL: uniform_value_double:
+; GFX1164DAGISEL: ; %bb.0: ; %entry
+; GFX1164DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164DAGISEL-NEXT: s_endpgm
+;
+; GFX1164GISEL-LABEL: uniform_value_double:
+; GFX1164GISEL: ; %bb.0: ; %entry
+; GFX1164GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX1164GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1164GISEL-NEXT: s_endpgm
+;
+; GFX1132DAGISEL-LABEL: uniform_value_double:
+; GFX1132DAGISEL: ; %bb.0: ; %entry
+; GFX1132DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132DAGISEL-NEXT: s_endpgm
+;
+; GFX1132GISEL-LABEL: uniform_value_double:
+; GFX1132GISEL: ; %bb.0: ; %entry
+; GFX1132GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132GISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1132GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1132GISEL-NEXT: s_endpgm
+;
+; GFX12DAGISEL-LABEL: uniform_value_double:
+; GFX12DAGISEL: ; %bb.0: ; %entry
+; GFX12DAGISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12DAGISEL-NEXT: v_mov_b32_e32 v2, 0
+; GFX12DAGISEL-NEXT: s_wait_kmcnt 0x0
+; GFX12DAGISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX12DAGISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX12DAGISEL-NEXT: s_endpgm
+entry:
+ %result = call double @llvm.amdgcn.wave.reduce.fmax(double %in, i32 1)
+ store double %result, ptr addrspace(1) %out
+ ret void
+}
+
+define void @divergent_value_double(ptr addrspace(1) %out, double %in) {
+; GFX8DAGISEL-LABEL: divergent_value_double:
+; GFX8DAGISEL: ; %bb.0: ; %entry
+; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8DAGISEL-NEXT: s_mov_b32 s6, 0
+; GFX8DAGISEL-NEXT: s_mov_b32 s7, 0x7ff80000
+; GFX8DAGISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX8DAGISEL-NEXT: .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s10, s[4:5]
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v4, s6
+; GFX8DAGISEL-NEXT: v_readlane_b32 s8, v2, s10
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v5, s7
+; GFX8DAGISEL-NEXT: v_readlane_b32 s9, v3, s10
+; GFX8DAGISEL-NEXT: v_max_f64 v[4:5], s[8:9], v[4:5]
+; GFX8DAGISEL-NEXT: s_bitset0_b64 s[4:5], s10
+; GFX8DAGISEL-NEXT: s_cmp_lg_u64 s[4:5], 0
+; GFX8DAGISEL-NEXT: v_readfirstlane_b32 s6, v4
+; GFX8DAGISEL-NEXT: v_readfirstlane_b32 s7, v5
+; GFX8DAGISEL-NEXT: s_cbranch_scc1 .LBB4_1
+; GFX8DAGISEL-NEXT: ; %bb.2:
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s6
+; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s7
+; GFX8DAGISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8DAGISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX8DAGISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8GISEL-LABEL: divergent_value_double:
+; GFX8GISEL: ; %bb.0: ; %entry
+; GFX8GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8GISEL-NEXT: s_mov_b32 s6, 0
+; GFX8GISEL-NEXT: s_mov_b32 s7, 0x7ff80000
+; GFX8GISEL-NEXT: s_mov_b64 s[4:5], exec
+; GFX8GISEL-NEXT: .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX8GISEL-NEXT: s_ff1_i32_b64 s10, s[4:5]
+; GFX8GISEL-NEXT: v_mov_b32_e32 v4, s6
+; GFX8GISEL-NEXT: v_readlane_b32 s8, v2, s10
+; GFX8GISEL-NEXT: v_mov_b32_e32 v5, s7
+; GFX8GISEL-NEXT: v_readlane_b32 s9, v3, s10
+; GFX8GISEL-NEXT: v_max_f64 v[4:5], s[8:9], v[4:5]
+; GFX8GISEL-NEXT: s_bitset0_b64 s[4:5], s10
+; GFX8GISEL-NEXT: s_cmp_lg_u64 s[4:5], 0
+; GFX8GISEL-NEXT: v_readfirstlane_b32 s6, v4
+; GFX8GISEL-NEXT: v_readfirstlane_b32 s7, v5
+; GFX8GISEL-NEXT: s_cbranch_scc1 .LBB4_1
+; GFX8GISEL-NEXT: ; %bb.2:
+; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s6
+; GFX8GISEL-NEXT: v_mov_b32_e32 v3, s7
+; GFX8GISEL-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX8GISEL-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9DAGISEL-LABEL: divergent_value_double:
+; GFX9DAGISEL: ; %bb.0: ; %entry
+; GFX9DAGISEL-NEXT: s_waitcnt vmcnt(0...
[truncated]
|
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.

Supported Ops:
min,max