diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp index 73592d52ad04c..10244da126b1e 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp @@ -1312,6 +1312,15 @@ RegBankLegalizeRules::RegBankLegalizeRules(const GCNSubtarget &_ST, .Any({{UniS16, S32}, {{Sgpr16}, {Sgpr32}}}, hasSALUFloat) .Any({{UniS16, S32}, {{UniInVgprS16}, {Vgpr32}}}, !hasSALUFloat); + addRulesForGOpcs({G_INTRINSIC_FPTRUNC_ROUND}) + .Any({{DivS16, S32}, {{Vgpr16}, {Vgpr32}}}) + .Any({{UniS16, S64}, {{UniInVgprS16}, {Vgpr64}}}) + .Any({{DivS16, S64}, {{Vgpr16}, {Vgpr64}}}) + .Any({{UniS32, S64}, {{UniInVgprS32}, {Vgpr64}}}) + .Any({{DivS32, S64}, {{Vgpr32}, {Vgpr64}}}) + .Any({{UniS16, S32}, {{Sgpr16}, {Sgpr32}}}, hasSALUFloat) + .Any({{UniS16, S32}, {{UniInVgprS16}, {Vgpr32}}}, !hasSALUFloat); + addRulesForGOpcs({G_IS_FPCLASS}) .Any({{DivS1, S16}, {{Vcc}, {Vgpr16}}}) .Any({{UniS1, S16}, {{UniInVcc}, {Vgpr16}}}) diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td index cde352313f86a..df3eed5460ae0 100644 --- a/llvm/lib/Target/AMDGPU/SIInstructions.td +++ b/llvm/lib/Target/AMDGPU/SIInstructions.td @@ -296,6 +296,10 @@ def FPTRUNC_ROUND_F32_F64_PSEUDO : VPseudoInstSI <(outs VGPR_32:$vdst), (ins VReg_64:$src0, i32imm:$round)>; } // End Uses = [MODE, EXEC] +let SubtargetPredicate = HasSALUFloatInsts, Uses = [MODE] in +def FPTRUNC_ROUND_F16_F32_SALU_PSEUDO : SPseudoInstSI <(outs SReg_32:$sdst), + (ins SReg_32:$src0, i32imm:$round)>; + let True16Predicate = NotHasTrue16BitInsts in def : GCNPat <(f16 (fptrunc_round f32:$src0, (i32 SupportedRoundMode:$round))), (FPTRUNC_ROUND_F16_F32_PSEUDO $src0, (as_hw_round_mode $round))>; @@ -308,6 +312,10 @@ let True16Predicate = UseRealTrue16Insts in def : GCNPat <(f16 (fptrunc_round (f32 (VOP3OpSelMods f32:$src0, i32:$src0_modifiers)), (i32 SupportedRoundMode:$round))), (FPTRUNC_ROUND_F16_F32_PSEUDO_t16_e64 $src0_modifiers, $src0, (as_hw_round_mode $round))>; +let SubtargetPredicate = HasSALUFloatInsts, AddedComplexity = 9 in +def : GCNPat <(f16 (UniformBinFrag f32:$src0, (i32 SupportedRoundMode:$round))), + (FPTRUNC_ROUND_F16_F32_SALU_PSEUDO SReg_32:$src0, (as_hw_round_mode $round))>; + def : GCNPat <(f32 (fptrunc_round f64:$src0, (i32 SupportedRoundMode:$round))), (FPTRUNC_ROUND_F32_F64_PSEUDO $src0, (as_hw_round_mode $round))>; diff --git a/llvm/lib/Target/AMDGPU/SIModeRegister.cpp b/llvm/lib/Target/AMDGPU/SIModeRegister.cpp index dc723986badc7..f9b33b4175c2f 100644 --- a/llvm/lib/Target/AMDGPU/SIModeRegister.cpp +++ b/llvm/lib/Target/AMDGPU/SIModeRegister.cpp @@ -174,7 +174,8 @@ Status SIModeRegister::getInstructionMode(MachineInstr &MI, Opcode == AMDGPU::FPTRUNC_ROUND_F16_F32_PSEUDO || Opcode == AMDGPU::FPTRUNC_ROUND_F16_F32_PSEUDO_fake16_e32 || Opcode == AMDGPU::FPTRUNC_ROUND_F16_F32_PSEUDO_t16_e64 || - Opcode == AMDGPU::FPTRUNC_ROUND_F32_F64_PSEUDO) { + Opcode == AMDGPU::FPTRUNC_ROUND_F32_F64_PSEUDO || + Opcode == AMDGPU::FPTRUNC_ROUND_F16_F32_SALU_PSEUDO) { switch (Opcode) { case AMDGPU::V_INTERP_P1LL_F16: case AMDGPU::V_INTERP_P1LV_F16: @@ -206,6 +207,12 @@ Status SIModeRegister::getInstructionMode(MachineInstr &MI, MI.setDesc(TII->get(AMDGPU::V_CVT_F32_F64_e32)); return Status(FP_ROUND_MODE_DP(3), FP_ROUND_MODE_DP(Mode)); } + case AMDGPU::FPTRUNC_ROUND_F16_F32_SALU_PSEUDO: { + unsigned Mode = MI.getOperand(2).getImm(); + MI.removeOperand(2); + MI.setDesc(TII->get(AMDGPU::S_CVT_F16_F32)); + return Status(FP_ROUND_MODE_DP(3), FP_ROUND_MODE_DP(Mode)); + } default: return DefaultStatus; } diff --git a/llvm/test/CodeGen/AMDGPU/llvm.fptrunc.round.err.ll b/llvm/test/CodeGen/AMDGPU/llvm.fptrunc.round.err.ll index d2caadff14b85..985e9927481ff 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.fptrunc.round.err.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.fptrunc.round.err.ll @@ -1,17 +1,17 @@ ; RUN: split-file %s %t ; RUN: not --crash llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1030 -filetype=null %t/f16-f64-err.ll 2>&1 | FileCheck --ignore-case --check-prefix=F16-F64-FAIL %s -; RUN: not llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1030 -filetype=null %t/f16-f64-err.ll 2>&1 | FileCheck --ignore-case --check-prefix=F16-F64-FAIL %s +; RUN: not llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1030 -filetype=null %t/f16-f64-err.ll 2>&1 | FileCheck --ignore-case --check-prefix=F16-F64-FAIL %s ; TODO: check for GISEL when bfloat is supported. ; RUN: not --crash llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1030 -filetype=null %t/bf16-f32-err.ll 2>&1 | FileCheck --ignore-case --check-prefix=BF16-F32-FAIL %s ; RUN: not --crash llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1030 -filetype=null %t/bf16-f64-err.ll 2>&1 | FileCheck --ignore-case --check-prefix=BF16-F64-FAIL %s ; RUN: not --crash llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1030 -filetype=null %t/f16-f32-tonearestaway-err.ll 2>&1 | FileCheck --ignore-case --check-prefix=F16-F32-TONEARESTAWAY-FAIL %s -; RUN: not llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1030 -filetype=null %t/f16-f32-tonearestaway-err.ll 2>&1 | FileCheck --ignore-case --check-prefix=F16-F32-TONEARESTAWAY-FAIL %s +; RUN: not llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1030 -filetype=null %t/f16-f32-tonearestaway-err.ll 2>&1 | FileCheck --ignore-case --check-prefix=F16-F32-TONEARESTAWAY-FAIL %s ; RUN: not --crash llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1030 -filetype=null %t/f32-f64-tonearestaway-err.ll 2>&1 | FileCheck --ignore-case --check-prefix=F32-F64-TONEARESTAWAY-FAIL %s -; RUN: not llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1030 -filetype=null %t/f32-f64-tonearestaway-err.ll 2>&1 | FileCheck --ignore-case --check-prefix=F32-F64-TONEARESTAWAY-FAIL %s +; RUN: not llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1030 -filetype=null %t/f32-f64-tonearestaway-err.ll 2>&1 | FileCheck --ignore-case --check-prefix=F32-F64-TONEARESTAWAY-FAIL %s ;--- f16-f64-err.ll define amdgpu_gs void @test_fptrunc_round_f16_f64(double %a, ptr addrspace(1) %out) { diff --git a/llvm/test/CodeGen/AMDGPU/llvm.fptrunc.round.ll b/llvm/test/CodeGen/AMDGPU/llvm.fptrunc.round.ll index 3d9ce6e79d9d2..3e45cd1f9a1cf 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.fptrunc.round.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.fptrunc.round.ll @@ -1,13 +1,20 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1030 < %s | FileCheck -check-prefixes=CHECK,SDAG %s ; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck -check-prefixes=CHECK,SDAG %s -; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1030 < %s | FileCheck -check-prefixes=CHECK,GISEL %s +; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1030 < %s | FileCheck -check-prefixes=CHECK,GISEL %s +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12,GFX12-SDAG %s +; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12,GFX12-GISEL %s define amdgpu_gs half @v_fptrunc_round_f32_to_f16_tonearest(float %a) { ; CHECK-LABEL: v_fptrunc_round_f32_to_f16_tonearest: ; CHECK: ; %bb.0: ; CHECK-NEXT: v_cvt_f16_f32_e32 v0, v0 ; CHECK-NEXT: ; return to shader part epilog +; +; GFX12-LABEL: v_fptrunc_round_f32_to_f16_tonearest: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-NEXT: ; return to shader part epilog %res = call half @llvm.fptrunc.round.f16.f32(float %a, metadata !"round.tonearest") ret half %res } @@ -18,6 +25,12 @@ define amdgpu_gs half @v_fptrunc_round_f32_to_f16_upward(float %a) { ; CHECK-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 1), 1 ; CHECK-NEXT: v_cvt_f16_f32_e32 v0, v0 ; CHECK-NEXT: ; return to shader part epilog +; +; GFX12-LABEL: v_fptrunc_round_f32_to_f16_upward: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 1), 1 +; GFX12-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-NEXT: ; return to shader part epilog %res = call half @llvm.fptrunc.round.f16.f32(float %a, metadata !"round.upward") ret half %res } @@ -28,6 +41,12 @@ define amdgpu_gs half @v_fptrunc_round_f32_to_f16_downward(float %a) { ; CHECK-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 3, 1), 1 ; CHECK-NEXT: v_cvt_f16_f32_e32 v0, v0 ; CHECK-NEXT: ; return to shader part epilog +; +; GFX12-LABEL: v_fptrunc_round_f32_to_f16_downward: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 3, 1), 1 +; GFX12-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-NEXT: ; return to shader part epilog %res = call half @llvm.fptrunc.round.f16.f32(float %a, metadata !"round.downward") ret half %res } @@ -38,6 +57,12 @@ define amdgpu_gs half @v_fptrunc_round_f32_to_f16_towardzero(float %a) { ; CHECK-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 3 ; CHECK-NEXT: v_cvt_f16_f32_e32 v0, v0 ; CHECK-NEXT: ; return to shader part epilog +; +; GFX12-LABEL: v_fptrunc_round_f32_to_f16_towardzero: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 2), 3 +; GFX12-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-NEXT: ; return to shader part epilog %res = call half @llvm.fptrunc.round.f16.f32(float %a, metadata !"round.towardzero") ret half %res } @@ -55,6 +80,20 @@ define amdgpu_gs void @v_fptrunc_round_f32_to_f16_upward_multiple_calls(float %a ; CHECK-NEXT: v_add_f16_e32 v0, v1, v0 ; CHECK-NEXT: global_store_short v[2:3], v0, off ; CHECK-NEXT: s_endpgm +; +; GFX12-LABEL: v_fptrunc_round_f32_to_f16_upward_multiple_calls: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 1), 1 +; GFX12-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-NEXT: v_cvt_f16_f32_e32 v4, v1 +; GFX12-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 2), 2 +; GFX12-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX12-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 3, 1), 0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-NEXT: v_add_f16_e32 v0, v0, v4 +; GFX12-NEXT: v_add_f16_e32 v0, v1, v0 +; GFX12-NEXT: global_store_b16 v[2:3], v0, off +; GFX12-NEXT: s_endpgm %res1 = call half @llvm.fptrunc.round.f16.f32(float %a, metadata !"round.upward") %res2 = call half @llvm.fptrunc.round.f16.f32(float %b, metadata !"round.upward") %res3 = call half @llvm.fptrunc.round.f16.f32(float %b, metadata !"round.downward") @@ -77,6 +116,20 @@ define amdgpu_gs void @v_fptrunc_round_f32_to_f16_downward_multiple_calls(float ; CHECK-NEXT: v_add_f16_e32 v0, v1, v0 ; CHECK-NEXT: global_store_short v[2:3], v0, off ; CHECK-NEXT: s_endpgm +; +; GFX12-LABEL: v_fptrunc_round_f32_to_f16_downward_multiple_calls: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 1), 1 +; GFX12-NEXT: v_cvt_f16_f32_e32 v4, v0 +; GFX12-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 2), 2 +; GFX12-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX12-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 3, 1), 0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-NEXT: v_add_f16_e32 v0, v4, v0 +; GFX12-NEXT: v_add_f16_e32 v0, v1, v0 +; GFX12-NEXT: global_store_b16 v[2:3], v0, off +; GFX12-NEXT: s_endpgm %res1 = call half @llvm.fptrunc.round.f16.f32(float %a, metadata !"round.upward") %res2 = call half @llvm.fptrunc.round.f16.f32(float %a, metadata !"round.downward") %res3 = call half @llvm.fptrunc.round.f16.f32(float %b, metadata !"round.downward") @@ -99,6 +152,20 @@ define amdgpu_gs void @v_fptrunc_round_f32_to_f16_towardzero_multiple_calls(floa ; CHECK-NEXT: v_add_f16_e32 v0, v1, v0 ; CHECK-NEXT: global_store_short v[2:3], v0, off ; CHECK-NEXT: s_endpgm +; +; GFX12-LABEL: v_fptrunc_round_f32_to_f16_towardzero_multiple_calls: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 2), 3 +; GFX12-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-NEXT: v_cvt_f16_f32_e32 v4, v1 +; GFX12-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 2), 1 +; GFX12-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX12-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 1), 0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-NEXT: v_add_f16_e32 v0, v0, v4 +; GFX12-NEXT: v_add_f16_e32 v0, v1, v0 +; GFX12-NEXT: global_store_b16 v[2:3], v0, off +; GFX12-NEXT: s_endpgm %res1 = call half @llvm.fptrunc.round.f16.f32(float %a, metadata !"round.towardzero") %res2 = call half @llvm.fptrunc.round.f16.f32(float %b, metadata !"round.towardzero") %res3 = call half @llvm.fptrunc.round.f16.f32(float %b, metadata !"round.upward") @@ -109,14 +176,31 @@ define amdgpu_gs void @v_fptrunc_round_f32_to_f16_towardzero_multiple_calls(floa } define amdgpu_gs i32 @s_fptrunc_round_f32_to_f16_upward(float inreg %a, ptr addrspace(1) %out) { -; CHECK-LABEL: s_fptrunc_round_f32_to_f16_upward: -; CHECK: ; %bb.0: -; CHECK-NEXT: v_mov_b32_e32 v0, s0 -; CHECK-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 1), 1 -; CHECK-NEXT: v_cvt_f16_f32_e32 v0, v0 -; CHECK-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; CHECK-NEXT: v_readfirstlane_b32 s0, v0 -; CHECK-NEXT: ; return to shader part epilog +; SDAG-LABEL: s_fptrunc_round_f32_to_f16_upward: +; SDAG: ; %bb.0: +; SDAG-NEXT: v_mov_b32_e32 v0, s0 +; SDAG-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 1), 1 +; SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SDAG-NEXT: v_readfirstlane_b32 s0, v0 +; SDAG-NEXT: ; return to shader part epilog +; +; GISEL-LABEL: s_fptrunc_round_f32_to_f16_upward: +; GISEL: ; %bb.0: +; GISEL-NEXT: v_mov_b32_e32 v0, s0 +; GISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 1), 1 +; GISEL-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GISEL-NEXT: v_readfirstlane_b32 s0, v0 +; GISEL-NEXT: s_and_b32 s0, 0xffff, s0 +; GISEL-NEXT: ; return to shader part epilog +; +; GFX12-LABEL: s_fptrunc_round_f32_to_f16_upward: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 1), 1 +; GFX12-NEXT: s_cvt_f16_f32 s0, s0 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_3) +; GFX12-NEXT: s_and_b32 s0, 0xffff, s0 +; GFX12-NEXT: ; return to shader part epilog %res = call half @llvm.fptrunc.round.f16.f32(float %a, metadata !"round.upward") %bitcast = bitcast half %res to i16 %ret = zext i16 %bitcast to i32 @@ -124,14 +208,31 @@ define amdgpu_gs i32 @s_fptrunc_round_f32_to_f16_upward(float inreg %a, ptr addr } define amdgpu_gs i32 @s_fptrunc_round_f32_to_f16_downward(float inreg %a, ptr addrspace(1) %out) { -; CHECK-LABEL: s_fptrunc_round_f32_to_f16_downward: -; CHECK: ; %bb.0: -; CHECK-NEXT: v_mov_b32_e32 v0, s0 -; CHECK-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 3, 1), 1 -; CHECK-NEXT: v_cvt_f16_f32_e32 v0, v0 -; CHECK-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; CHECK-NEXT: v_readfirstlane_b32 s0, v0 -; CHECK-NEXT: ; return to shader part epilog +; SDAG-LABEL: s_fptrunc_round_f32_to_f16_downward: +; SDAG: ; %bb.0: +; SDAG-NEXT: v_mov_b32_e32 v0, s0 +; SDAG-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 3, 1), 1 +; SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SDAG-NEXT: v_readfirstlane_b32 s0, v0 +; SDAG-NEXT: ; return to shader part epilog +; +; GISEL-LABEL: s_fptrunc_round_f32_to_f16_downward: +; GISEL: ; %bb.0: +; GISEL-NEXT: v_mov_b32_e32 v0, s0 +; GISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 3, 1), 1 +; GISEL-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GISEL-NEXT: v_readfirstlane_b32 s0, v0 +; GISEL-NEXT: s_and_b32 s0, 0xffff, s0 +; GISEL-NEXT: ; return to shader part epilog +; +; GFX12-LABEL: s_fptrunc_round_f32_to_f16_downward: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 3, 1), 1 +; GFX12-NEXT: s_cvt_f16_f32 s0, s0 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_3) +; GFX12-NEXT: s_and_b32 s0, 0xffff, s0 +; GFX12-NEXT: ; return to shader part epilog %res = call half @llvm.fptrunc.round.f16.f32(float %a, metadata !"round.downward") %bitcast = bitcast half %res to i16 %ret = zext i16 %bitcast to i32 @@ -153,6 +254,21 @@ define amdgpu_gs void @s_fptrunc_round_f32_to_f16_upward_multiple_calls(float in ; CHECK-NEXT: v_add_f16_e32 v2, v3, v2 ; CHECK-NEXT: global_store_short v[0:1], v2, off ; CHECK-NEXT: s_endpgm +; +; GFX12-LABEL: s_fptrunc_round_f32_to_f16_upward_multiple_calls: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 1), 1 +; GFX12-NEXT: s_cvt_f16_f32 s0, s0 +; GFX12-NEXT: s_cvt_f16_f32 s2, s1 +; GFX12-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 2), 2 +; GFX12-NEXT: s_cvt_f16_f32 s1, s1 +; GFX12-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 3, 1), 0 +; GFX12-NEXT: s_add_f16 s0, s0, s2 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(SALU_CYCLE_3) +; GFX12-NEXT: s_add_f16 s0, s1, s0 +; GFX12-NEXT: v_mov_b32_e32 v2, s0 +; GFX12-NEXT: global_store_b16 v[0:1], v2, off +; GFX12-NEXT: s_endpgm %res1 = call half @llvm.fptrunc.round.f16.f32(float %a, metadata !"round.upward") %res2 = call half @llvm.fptrunc.round.f16.f32(float %b, metadata !"round.upward") %res3 = call half @llvm.fptrunc.round.f16.f32(float %b, metadata !"round.downward") @@ -178,6 +294,24 @@ define amdgpu_gs <2 x half> @v_fptrunc_round_v2f32_to_v2f16_upward(<2 x float> % ; GISEL-NEXT: v_cvt_f16_f32_e32 v1, v1 ; GISEL-NEXT: v_pack_b32_f16 v0, v0, v1 ; GISEL-NEXT: ; return to shader part epilog +; +; GFX12-SDAG-LABEL: v_fptrunc_round_v2f32_to_v2f16_upward: +; GFX12-SDAG: ; %bb.0: +; GFX12-SDAG-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 1), 1 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-SDAG-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 +; GFX12-SDAG-NEXT: ; return to shader part epilog +; +; GFX12-GISEL-LABEL: v_fptrunc_round_v2f32_to_v2f16_upward: +; GFX12-GISEL: ; %bb.0: +; GFX12-GISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 1), 1 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-GISEL-NEXT: v_pack_b32_f16 v0, v0, v1 +; GFX12-GISEL-NEXT: ; return to shader part epilog %res = call <2 x half> @llvm.fptrunc.round.v2f16.v2f32(<2 x float> %a, metadata !"round.upward") ret <2 x half> %res } @@ -198,6 +332,24 @@ define amdgpu_gs <2 x half> @v_fptrunc_round_v2f32_to_v2f16_downward(<2 x float> ; GISEL-NEXT: v_cvt_f16_f32_e32 v1, v1 ; GISEL-NEXT: v_pack_b32_f16 v0, v0, v1 ; GISEL-NEXT: ; return to shader part epilog +; +; GFX12-SDAG-LABEL: v_fptrunc_round_v2f32_to_v2f16_downward: +; GFX12-SDAG: ; %bb.0: +; GFX12-SDAG-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 3, 1), 1 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-SDAG-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 +; GFX12-SDAG-NEXT: ; return to shader part epilog +; +; GFX12-GISEL-LABEL: v_fptrunc_round_v2f32_to_v2f16_downward: +; GFX12-GISEL: ; %bb.0: +; GFX12-GISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 3, 1), 1 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-GISEL-NEXT: v_pack_b32_f16 v0, v0, v1 +; GFX12-GISEL-NEXT: ; return to shader part epilog %res = call <2 x half> @llvm.fptrunc.round.v2f16.v2f32(<2 x float> %a, metadata !"round.downward") ret <2 x half> %res } @@ -240,6 +392,48 @@ define amdgpu_gs void @v_fptrunc_round_v2f32_to_v2f16_upward_multiple_calls(<2 x ; GISEL-NEXT: v_pk_add_f16 v0, v1, v0 ; GISEL-NEXT: global_store_dword v[4:5], v0, off ; GISEL-NEXT: s_endpgm +; +; GFX12-SDAG-LABEL: v_fptrunc_round_v2f32_to_v2f16_upward_multiple_calls: +; GFX12-SDAG: ; %bb.0: +; GFX12-SDAG-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 1), 1 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v6, v2 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v7, v3 +; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX12-SDAG-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 +; GFX12-SDAG-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 2), 2 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v2 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v2, v3 +; GFX12-SDAG-NEXT: v_perm_b32 v3, v7, v6, 0x5040100 +; GFX12-SDAG-NEXT: v_perm_b32 v1, v2, v1, 0x5040100 +; GFX12-SDAG-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 3, 1), 0 +; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-SDAG-NEXT: v_pk_add_f16 v0, v0, v3 +; GFX12-SDAG-NEXT: v_pk_add_f16 v0, v1, v0 +; GFX12-SDAG-NEXT: global_store_b32 v[4:5], v0, off +; GFX12-SDAG-NEXT: s_endpgm +; +; GFX12-GISEL-LABEL: v_fptrunc_round_v2f32_to_v2f16_upward_multiple_calls: +; GFX12-GISEL: ; %bb.0: +; GFX12-GISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 1), 1 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v6, v2 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v7, v3 +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX12-GISEL-NEXT: v_pack_b32_f16 v0, v0, v1 +; GFX12-GISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 2), 2 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v1, v2 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v2, v3 +; GFX12-GISEL-NEXT: v_pack_b32_f16 v3, v6, v7 +; GFX12-GISEL-NEXT: v_pack_b32_f16 v1, v1, v2 +; GFX12-GISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 3, 1), 0 +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-GISEL-NEXT: v_pk_add_f16 v0, v0, v3 +; GFX12-GISEL-NEXT: v_pk_add_f16 v0, v1, v0 +; GFX12-GISEL-NEXT: global_store_b32 v[4:5], v0, off +; GFX12-GISEL-NEXT: s_endpgm %res1 = call <2 x half> @llvm.fptrunc.round.v2f16.v2f32(<2 x float> %a, metadata !"round.upward") %res2 = call <2 x half> @llvm.fptrunc.round.v2f16.v2f32(<2 x float> %b, metadata !"round.upward") %res3 = call <2 x half> @llvm.fptrunc.round.v2f16.v2f32(<2 x float> %b, metadata !"round.downward") @@ -250,18 +444,41 @@ define amdgpu_gs void @v_fptrunc_round_v2f32_to_v2f16_upward_multiple_calls(<2 x } define amdgpu_gs <2 x i32> @s_fptrunc_round_v2f32_to_v2f16_upward(<2 x float> inreg %a, ptr addrspace(1) %out) { -; CHECK-LABEL: s_fptrunc_round_v2f32_to_v2f16_upward: -; CHECK: ; %bb.0: -; CHECK-NEXT: v_mov_b32_e32 v0, s0 -; CHECK-NEXT: v_mov_b32_e32 v1, s1 -; CHECK-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 1), 1 -; CHECK-NEXT: v_cvt_f16_f32_e32 v0, v0 -; CHECK-NEXT: v_cvt_f16_f32_e32 v1, v1 -; CHECK-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; CHECK-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; CHECK-NEXT: v_readfirstlane_b32 s0, v0 -; CHECK-NEXT: v_readfirstlane_b32 s1, v1 -; CHECK-NEXT: ; return to shader part epilog +; SDAG-LABEL: s_fptrunc_round_v2f32_to_v2f16_upward: +; SDAG: ; %bb.0: +; SDAG-NEXT: v_mov_b32_e32 v0, s0 +; SDAG-NEXT: v_mov_b32_e32 v1, s1 +; SDAG-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 1), 1 +; SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SDAG-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SDAG-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SDAG-NEXT: v_readfirstlane_b32 s0, v0 +; SDAG-NEXT: v_readfirstlane_b32 s1, v1 +; SDAG-NEXT: ; return to shader part epilog +; +; GISEL-LABEL: s_fptrunc_round_v2f32_to_v2f16_upward: +; GISEL: ; %bb.0: +; GISEL-NEXT: v_mov_b32_e32 v0, s0 +; GISEL-NEXT: v_mov_b32_e32 v1, s1 +; GISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 1), 1 +; GISEL-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GISEL-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GISEL-NEXT: v_readfirstlane_b32 s0, v0 +; GISEL-NEXT: v_readfirstlane_b32 s1, v1 +; GISEL-NEXT: s_and_b32 s0, 0xffff, s0 +; GISEL-NEXT: s_and_b32 s1, 0xffff, s1 +; GISEL-NEXT: ; return to shader part epilog +; +; GFX12-LABEL: s_fptrunc_round_v2f32_to_v2f16_upward: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 1), 1 +; GFX12-NEXT: s_cvt_f16_f32 s0, s0 +; GFX12-NEXT: s_cvt_f16_f32 s1, s1 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_2) | instskip(NEXT) | instid1(SALU_CYCLE_2) +; GFX12-NEXT: s_and_b32 s0, 0xffff, s0 +; GFX12-NEXT: s_and_b32 s1, 0xffff, s1 +; GFX12-NEXT: ; return to shader part epilog %res = call <2 x half> @llvm.fptrunc.round.v2f16.v2f32(<2 x float> %a, metadata !"round.upward") %bitcast = bitcast <2 x half> %res to <2 x i16> %ret = zext <2 x i16> %bitcast to <2 x i32> @@ -269,18 +486,41 @@ define amdgpu_gs <2 x i32> @s_fptrunc_round_v2f32_to_v2f16_upward(<2 x float> in } define amdgpu_gs <2 x i32> @s_fptrunc_round_v2f32_to_v2f16_downward(<2 x float> inreg %a, ptr addrspace(1) %out) { -; CHECK-LABEL: s_fptrunc_round_v2f32_to_v2f16_downward: -; CHECK: ; %bb.0: -; CHECK-NEXT: v_mov_b32_e32 v0, s0 -; CHECK-NEXT: v_mov_b32_e32 v1, s1 -; CHECK-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 3, 1), 1 -; CHECK-NEXT: v_cvt_f16_f32_e32 v0, v0 -; CHECK-NEXT: v_cvt_f16_f32_e32 v1, v1 -; CHECK-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; CHECK-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; CHECK-NEXT: v_readfirstlane_b32 s0, v0 -; CHECK-NEXT: v_readfirstlane_b32 s1, v1 -; CHECK-NEXT: ; return to shader part epilog +; SDAG-LABEL: s_fptrunc_round_v2f32_to_v2f16_downward: +; SDAG: ; %bb.0: +; SDAG-NEXT: v_mov_b32_e32 v0, s0 +; SDAG-NEXT: v_mov_b32_e32 v1, s1 +; SDAG-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 3, 1), 1 +; SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SDAG-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SDAG-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SDAG-NEXT: v_readfirstlane_b32 s0, v0 +; SDAG-NEXT: v_readfirstlane_b32 s1, v1 +; SDAG-NEXT: ; return to shader part epilog +; +; GISEL-LABEL: s_fptrunc_round_v2f32_to_v2f16_downward: +; GISEL: ; %bb.0: +; GISEL-NEXT: v_mov_b32_e32 v0, s0 +; GISEL-NEXT: v_mov_b32_e32 v1, s1 +; GISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 3, 1), 1 +; GISEL-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GISEL-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GISEL-NEXT: v_readfirstlane_b32 s0, v0 +; GISEL-NEXT: v_readfirstlane_b32 s1, v1 +; GISEL-NEXT: s_and_b32 s0, 0xffff, s0 +; GISEL-NEXT: s_and_b32 s1, 0xffff, s1 +; GISEL-NEXT: ; return to shader part epilog +; +; GFX12-LABEL: s_fptrunc_round_v2f32_to_v2f16_downward: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 3, 1), 1 +; GFX12-NEXT: s_cvt_f16_f32 s0, s0 +; GFX12-NEXT: s_cvt_f16_f32 s1, s1 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_2) | instskip(NEXT) | instid1(SALU_CYCLE_2) +; GFX12-NEXT: s_and_b32 s0, 0xffff, s0 +; GFX12-NEXT: s_and_b32 s1, 0xffff, s1 +; GFX12-NEXT: ; return to shader part epilog %res = call <2 x half> @llvm.fptrunc.round.v2f16.v2f32(<2 x float> %a, metadata !"round.downward") %bitcast = bitcast <2 x half> %res to <2 x i16> %ret = zext <2 x i16> %bitcast to <2 x i32> @@ -325,17 +565,66 @@ define amdgpu_gs void @s_fptrunc_round_v2f32_to_v2f16_upward_multiple_calls(<2 x ; GISEL-NEXT: v_cvt_f16_f32_e32 v3, v3 ; GISEL-NEXT: v_cvt_f16_f32_e32 v6, v4 ; GISEL-NEXT: v_cvt_f16_f32_e32 v7, v5 -; GISEL-NEXT: v_pack_b32_f16 v2, v2, v3 +; GISEL-NEXT: v_readfirstlane_b32 s0, v2 +; GISEL-NEXT: v_readfirstlane_b32 s1, v3 +; GISEL-NEXT: v_readfirstlane_b32 s2, v6 +; GISEL-NEXT: v_readfirstlane_b32 s3, v7 ; GISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 2 -; GISEL-NEXT: v_cvt_f16_f32_e32 v3, v4 -; GISEL-NEXT: v_cvt_f16_f32_e32 v4, v5 -; GISEL-NEXT: v_pack_b32_f16 v5, v6, v7 -; GISEL-NEXT: v_pack_b32_f16 v3, v3, v4 +; GISEL-NEXT: v_cvt_f16_f32_e32 v2, v4 +; GISEL-NEXT: v_cvt_f16_f32_e32 v3, v5 +; GISEL-NEXT: s_pack_ll_b32_b16 s0, s0, s1 +; GISEL-NEXT: s_pack_ll_b32_b16 s1, s2, s3 +; GISEL-NEXT: v_readfirstlane_b32 s2, v2 +; GISEL-NEXT: v_readfirstlane_b32 s3, v3 ; GISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 3, 1), 0 -; GISEL-NEXT: v_pk_add_f16 v2, v2, v5 -; GISEL-NEXT: v_pk_add_f16 v2, v3, v2 +; GISEL-NEXT: v_pk_add_f16 v2, s0, s1 +; GISEL-NEXT: s_pack_ll_b32_b16 s0, s2, s3 +; GISEL-NEXT: v_pk_add_f16 v2, s0, v2 ; GISEL-NEXT: global_store_dword v[0:1], v2, off ; GISEL-NEXT: s_endpgm +; +; GFX12-SDAG-LABEL: s_fptrunc_round_v2f32_to_v2f16_upward_multiple_calls: +; GFX12-SDAG: ; %bb.0: +; GFX12-SDAG-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 1), 1 +; GFX12-SDAG-NEXT: s_cvt_f16_f32 s1, s1 +; GFX12-SDAG-NEXT: s_cvt_f16_f32 s0, s0 +; GFX12-SDAG-NEXT: s_cvt_f16_f32 s4, s3 +; GFX12-SDAG-NEXT: s_cvt_f16_f32 s5, s2 +; GFX12-SDAG-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 2), 2 +; GFX12-SDAG-NEXT: s_cvt_f16_f32 s3, s3 +; GFX12-SDAG-NEXT: s_cvt_f16_f32 s2, s2 +; GFX12-SDAG-NEXT: s_pack_ll_b32_b16 s0, s0, s1 +; GFX12-SDAG-NEXT: s_pack_ll_b32_b16 s1, s5, s4 +; GFX12-SDAG-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 3, 1), 0 +; GFX12-SDAG-NEXT: v_pk_add_f16 v2, s0, s1 +; GFX12-SDAG-NEXT: s_pack_ll_b32_b16 s0, s2, s3 +; GFX12-SDAG-NEXT: s_wait_alu depctr_sa_sdst(0) +; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-SDAG-NEXT: v_pk_add_f16 v2, s0, v2 +; GFX12-SDAG-NEXT: global_store_b32 v[0:1], v2, off +; GFX12-SDAG-NEXT: s_endpgm +; +; GFX12-GISEL-LABEL: s_fptrunc_round_v2f32_to_v2f16_upward_multiple_calls: +; GFX12-GISEL: ; %bb.0: +; GFX12-GISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 1), 1 +; GFX12-GISEL-NEXT: s_cvt_f16_f32 s0, s0 +; GFX12-GISEL-NEXT: s_cvt_f16_f32 s1, s1 +; GFX12-GISEL-NEXT: s_cvt_f16_f32 s4, s2 +; GFX12-GISEL-NEXT: s_cvt_f16_f32 s5, s3 +; GFX12-GISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 2), 2 +; GFX12-GISEL-NEXT: s_cvt_f16_f32 s2, s2 +; GFX12-GISEL-NEXT: s_cvt_f16_f32 s3, s3 +; GFX12-GISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 3, 1), 0 +; GFX12-GISEL-NEXT: s_add_f16 s0, s0, s4 +; GFX12-GISEL-NEXT: s_add_f16 s1, s1, s5 +; GFX12-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_2) | instskip(NEXT) | instid1(SALU_CYCLE_2) +; GFX12-GISEL-NEXT: s_add_f16 s0, s2, s0 +; GFX12-GISEL-NEXT: s_add_f16 s1, s3, s1 +; GFX12-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX12-GISEL-NEXT: s_pack_ll_b32_b16 s0, s0, s1 +; GFX12-GISEL-NEXT: v_mov_b32_e32 v2, s0 +; GFX12-GISEL-NEXT: global_store_b32 v[0:1], v2, off +; GFX12-GISEL-NEXT: s_endpgm %res1 = call <2 x half> @llvm.fptrunc.round.v2f16.v2f32(<2 x float> %a, metadata !"round.upward") %res2 = call <2 x half> @llvm.fptrunc.round.v2f16.v2f32(<2 x float> %b, metadata !"round.upward") %res3 = call <2 x half> @llvm.fptrunc.round.v2f16.v2f32(<2 x float> %b, metadata !"round.downward") @@ -358,11 +647,36 @@ define amdgpu_gs <3 x half> @v_fptrunc_round_v3f32_to_v3f16_upward(<3 x float> % ; GISEL-LABEL: v_fptrunc_round_v3f32_to_v3f16_upward: ; GISEL: ; %bb.0: ; GISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 1), 1 +; GISEL-NEXT: v_cvt_f16_f32_e32 v2, v2 ; GISEL-NEXT: v_cvt_f16_f32_e32 v0, v0 ; GISEL-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GISEL-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; GISEL-NEXT: v_pack_b32_f16 v0, v0, v1 -; GISEL-NEXT: v_cvt_f16_f32_e32 v1, v2 +; GISEL-NEXT: v_lshl_or_b32 v1, s0, 16, v2 ; GISEL-NEXT: ; return to shader part epilog +; +; GFX12-SDAG-LABEL: v_fptrunc_round_v3f32_to_v3f16_upward: +; GFX12-SDAG: ; %bb.0: +; GFX12-SDAG-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 1), 1 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-SDAG-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v2 +; GFX12-SDAG-NEXT: ; return to shader part epilog +; +; GFX12-GISEL-LABEL: v_fptrunc_round_v3f32_to_v3f16_upward: +; GFX12-GISEL: ; %bb.0: +; GFX12-GISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 1), 1 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v2, v2 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX12-GISEL-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-GISEL-NEXT: v_pack_b32_f16 v0, v0, v1 +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-GISEL-NEXT: v_lshl_or_b32 v1, s0, 16, v2 +; GFX12-GISEL-NEXT: ; return to shader part epilog %res = call <3 x half> @llvm.fptrunc.round.v3f16.v3f32(<3 x float> %a, metadata !"round.upward") ret <3 x half> %res } @@ -380,11 +694,36 @@ define amdgpu_gs <3 x half> @v_fptrunc_round_v3f32_to_v3f16_downward(<3 x float> ; GISEL-LABEL: v_fptrunc_round_v3f32_to_v3f16_downward: ; GISEL: ; %bb.0: ; GISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 3, 1), 1 +; GISEL-NEXT: v_cvt_f16_f32_e32 v2, v2 ; GISEL-NEXT: v_cvt_f16_f32_e32 v0, v0 ; GISEL-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GISEL-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; GISEL-NEXT: v_pack_b32_f16 v0, v0, v1 -; GISEL-NEXT: v_cvt_f16_f32_e32 v1, v2 +; GISEL-NEXT: v_lshl_or_b32 v1, s0, 16, v2 ; GISEL-NEXT: ; return to shader part epilog +; +; GFX12-SDAG-LABEL: v_fptrunc_round_v3f32_to_v3f16_downward: +; GFX12-SDAG: ; %bb.0: +; GFX12-SDAG-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 3, 1), 1 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-SDAG-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v2 +; GFX12-SDAG-NEXT: ; return to shader part epilog +; +; GFX12-GISEL-LABEL: v_fptrunc_round_v3f32_to_v3f16_downward: +; GFX12-GISEL: ; %bb.0: +; GFX12-GISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 3, 1), 1 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v2, v2 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX12-GISEL-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-GISEL-NEXT: v_pack_b32_f16 v0, v0, v1 +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-GISEL-NEXT: v_lshl_or_b32 v1, s0, 16, v2 +; GFX12-GISEL-NEXT: ; return to shader part epilog %res = call <3 x half> @llvm.fptrunc.round.v3f16.v3f32(<3 x float> %a, metadata !"round.downward") ret <3 x half> %res } @@ -411,6 +750,30 @@ define amdgpu_gs <4 x half> @v_fptrunc_round_v4f32_to_v4f16_upward(<4 x float> % ; GISEL-NEXT: v_pack_b32_f16 v0, v0, v1 ; GISEL-NEXT: v_pack_b32_f16 v1, v2, v3 ; GISEL-NEXT: ; return to shader part epilog +; +; GFX12-SDAG-LABEL: v_fptrunc_round_v4f32_to_v4f16_upward: +; GFX12-SDAG: ; %bb.0: +; GFX12-SDAG-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 1), 1 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v2, v2 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX12-SDAG-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 +; GFX12-SDAG-NEXT: v_perm_b32 v1, v3, v2, 0x5040100 +; GFX12-SDAG-NEXT: ; return to shader part epilog +; +; GFX12-GISEL-LABEL: v_fptrunc_round_v4f32_to_v4f16_upward: +; GFX12-GISEL: ; %bb.0: +; GFX12-GISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 1), 1 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v2, v2 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX12-GISEL-NEXT: v_pack_b32_f16 v0, v0, v1 +; GFX12-GISEL-NEXT: v_pack_b32_f16 v1, v2, v3 +; GFX12-GISEL-NEXT: ; return to shader part epilog %res = call <4 x half> @llvm.fptrunc.round.v4f16.v4f32(<4 x float> %a, metadata !"round.upward") ret <4 x half> %res } @@ -437,6 +800,30 @@ define amdgpu_gs <4 x half> @v_fptrunc_round_v4f32_to_v4f16_downward(<4 x float> ; GISEL-NEXT: v_pack_b32_f16 v0, v0, v1 ; GISEL-NEXT: v_pack_b32_f16 v1, v2, v3 ; GISEL-NEXT: ; return to shader part epilog +; +; GFX12-SDAG-LABEL: v_fptrunc_round_v4f32_to_v4f16_downward: +; GFX12-SDAG: ; %bb.0: +; GFX12-SDAG-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 3, 1), 1 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v2, v2 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX12-SDAG-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 +; GFX12-SDAG-NEXT: v_perm_b32 v1, v3, v2, 0x5040100 +; GFX12-SDAG-NEXT: ; return to shader part epilog +; +; GFX12-GISEL-LABEL: v_fptrunc_round_v4f32_to_v4f16_downward: +; GFX12-GISEL: ; %bb.0: +; GFX12-GISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 3, 1), 1 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v2, v2 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX12-GISEL-NEXT: v_pack_b32_f16 v0, v0, v1 +; GFX12-GISEL-NEXT: v_pack_b32_f16 v1, v2, v3 +; GFX12-GISEL-NEXT: ; return to shader part epilog %res = call <4 x half> @llvm.fptrunc.round.v4f16.v4f32(<4 x float> %a, metadata !"round.downward") ret <4 x half> %res } @@ -475,6 +862,43 @@ define amdgpu_gs <8 x half> @v_fptrunc_round_v8f32_to_v8f16_upward(<8 x float> % ; GISEL-NEXT: v_pack_b32_f16 v2, v4, v5 ; GISEL-NEXT: v_pack_b32_f16 v3, v6, v7 ; GISEL-NEXT: ; return to shader part epilog +; +; GFX12-SDAG-LABEL: v_fptrunc_round_v8f32_to_v8f16_upward: +; GFX12-SDAG: ; %bb.0: +; GFX12-SDAG-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 1), 1 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v6, v6 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v4, v4 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v2, v2 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v5, v5 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v7, v7 +; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX12-SDAG-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 +; GFX12-SDAG-NEXT: v_perm_b32 v1, v3, v2, 0x5040100 +; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX12-SDAG-NEXT: v_perm_b32 v2, v5, v4, 0x5040100 +; GFX12-SDAG-NEXT: v_perm_b32 v3, v7, v6, 0x5040100 +; GFX12-SDAG-NEXT: ; return to shader part epilog +; +; GFX12-GISEL-LABEL: v_fptrunc_round_v8f32_to_v8f16_upward: +; GFX12-GISEL: ; %bb.0: +; GFX12-GISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 1), 1 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v2, v2 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v4, v4 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v5, v5 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v6, v6 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v7, v7 +; GFX12-GISEL-NEXT: v_pack_b32_f16 v0, v0, v1 +; GFX12-GISEL-NEXT: v_pack_b32_f16 v1, v2, v3 +; GFX12-GISEL-NEXT: v_pack_b32_f16 v2, v4, v5 +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX12-GISEL-NEXT: v_pack_b32_f16 v3, v6, v7 +; GFX12-GISEL-NEXT: ; return to shader part epilog %res = call <8 x half> @llvm.fptrunc.round.v8f16.v8f32(<8 x float> %a, metadata !"round.upward") ret <8 x half> %res } @@ -513,6 +937,43 @@ define amdgpu_gs <8 x half> @v_fptrunc_round_v8f32_to_v8f16_downward(<8 x float> ; GISEL-NEXT: v_pack_b32_f16 v2, v4, v5 ; GISEL-NEXT: v_pack_b32_f16 v3, v6, v7 ; GISEL-NEXT: ; return to shader part epilog +; +; GFX12-SDAG-LABEL: v_fptrunc_round_v8f32_to_v8f16_downward: +; GFX12-SDAG: ; %bb.0: +; GFX12-SDAG-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 3, 1), 1 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v6, v6 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v4, v4 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v2, v2 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v5, v5 +; GFX12-SDAG-NEXT: v_cvt_f16_f32_e32 v7, v7 +; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX12-SDAG-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 +; GFX12-SDAG-NEXT: v_perm_b32 v1, v3, v2, 0x5040100 +; GFX12-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX12-SDAG-NEXT: v_perm_b32 v2, v5, v4, 0x5040100 +; GFX12-SDAG-NEXT: v_perm_b32 v3, v7, v6, 0x5040100 +; GFX12-SDAG-NEXT: ; return to shader part epilog +; +; GFX12-GISEL-LABEL: v_fptrunc_round_v8f32_to_v8f16_downward: +; GFX12-GISEL: ; %bb.0: +; GFX12-GISEL-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 3, 1), 1 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v2, v2 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v3, v3 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v4, v4 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v5, v5 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v6, v6 +; GFX12-GISEL-NEXT: v_cvt_f16_f32_e32 v7, v7 +; GFX12-GISEL-NEXT: v_pack_b32_f16 v0, v0, v1 +; GFX12-GISEL-NEXT: v_pack_b32_f16 v1, v2, v3 +; GFX12-GISEL-NEXT: v_pack_b32_f16 v2, v4, v5 +; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX12-GISEL-NEXT: v_pack_b32_f16 v3, v6, v7 +; GFX12-GISEL-NEXT: ; return to shader part epilog %res = call <8 x half> @llvm.fptrunc.round.v8f16.v8f32(<8 x float> %a, metadata !"round.downward") ret <8 x half> %res } @@ -522,6 +983,11 @@ define amdgpu_gs float @v_fptrunc_round_f64_to_f32_tonearest(double %a) { ; CHECK: ; %bb.0: ; CHECK-NEXT: v_cvt_f32_f64_e32 v0, v[0:1] ; CHECK-NEXT: ; return to shader part epilog +; +; GFX12-LABEL: v_fptrunc_round_f64_to_f32_tonearest: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_cvt_f32_f64_e32 v0, v[0:1] +; GFX12-NEXT: ; return to shader part epilog %res = call float @llvm.fptrunc.round.f32.f64(double %a, metadata !"round.tonearest") ret float %res } @@ -532,6 +998,12 @@ define amdgpu_gs float @v_fptrunc_round_f64_to_f32_upward(double %a) { ; CHECK-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 1), 1 ; CHECK-NEXT: v_cvt_f32_f64_e32 v0, v[0:1] ; CHECK-NEXT: ; return to shader part epilog +; +; GFX12-LABEL: v_fptrunc_round_f64_to_f32_upward: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 1), 1 +; GFX12-NEXT: v_cvt_f32_f64_e32 v0, v[0:1] +; GFX12-NEXT: ; return to shader part epilog %res = call float @llvm.fptrunc.round.f32.f64(double %a, metadata !"round.upward") ret float %res } @@ -542,6 +1014,12 @@ define amdgpu_gs float @v_fptrunc_round_f64_to_f32_downward(double %a) { ; CHECK-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 3, 1), 1 ; CHECK-NEXT: v_cvt_f32_f64_e32 v0, v[0:1] ; CHECK-NEXT: ; return to shader part epilog +; +; GFX12-LABEL: v_fptrunc_round_f64_to_f32_downward: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 3, 1), 1 +; GFX12-NEXT: v_cvt_f32_f64_e32 v0, v[0:1] +; GFX12-NEXT: ; return to shader part epilog %res = call float @llvm.fptrunc.round.f32.f64(double %a, metadata !"round.downward") ret float %res } @@ -552,6 +1030,52 @@ define amdgpu_gs float @v_fptrunc_round_f64_to_f32_towardzero(double %a) { ; CHECK-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 3 ; CHECK-NEXT: v_cvt_f32_f64_e32 v0, v[0:1] ; CHECK-NEXT: ; return to shader part epilog +; +; GFX12-LABEL: v_fptrunc_round_f64_to_f32_towardzero: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 2), 3 +; GFX12-NEXT: v_cvt_f32_f64_e32 v0, v[0:1] +; GFX12-NEXT: ; return to shader part epilog %res = call float @llvm.fptrunc.round.f32.f64(double %a, metadata !"round.towardzero") ret float %res } + +define amdgpu_gs float @s_fptrunc_round_f64_to_f32_upward(double inreg %a) { +; CHECK-LABEL: s_fptrunc_round_f64_to_f32_upward: +; CHECK: ; %bb.0: +; CHECK-NEXT: v_mov_b32_e32 v0, s0 +; CHECK-NEXT: v_mov_b32_e32 v1, s1 +; CHECK-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 1), 1 +; CHECK-NEXT: v_cvt_f32_f64_e32 v0, v[0:1] +; CHECK-NEXT: ; return to shader part epilog +; +; GFX12-LABEL: s_fptrunc_round_f64_to_f32_upward: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GFX12-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 2, 1), 1 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_cvt_f32_f64_e32 v0, v[0:1] +; GFX12-NEXT: ; return to shader part epilog + %res = call float @llvm.fptrunc.round.f32.f64(double %a, metadata !"round.upward") + ret float %res +} + +define amdgpu_gs float @s_fptrunc_round_f64_to_f32_downward(double inreg %a) { +; CHECK-LABEL: s_fptrunc_round_f64_to_f32_downward: +; CHECK: ; %bb.0: +; CHECK-NEXT: v_mov_b32_e32 v0, s0 +; CHECK-NEXT: v_mov_b32_e32 v1, s1 +; CHECK-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 3, 1), 1 +; CHECK-NEXT: v_cvt_f32_f64_e32 v0, v[0:1] +; CHECK-NEXT: ; return to shader part epilog +; +; GFX12-LABEL: s_fptrunc_round_f64_to_f32_downward: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GFX12-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 3, 1), 1 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_cvt_f32_f64_e32 v0, v[0:1] +; GFX12-NEXT: ; return to shader part epilog + %res = call float @llvm.fptrunc.round.f32.f64(double %a, metadata !"round.downward") + ret float %res +}