diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp index 0b85a4c815f8e..f1a109a6f3497 100644 --- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -4218,6 +4218,9 @@ LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx, case G_SSUBO: case G_SADDE: case G_SSUBE: + case G_STRICT_FADD: + case G_STRICT_FMUL: + case G_STRICT_FMA: return fewerElementsVectorMultiEltType(GMI, NumElts); case G_ICMP: case G_FCMP: @@ -4833,6 +4836,7 @@ LegalizerHelper::moreElementsVector(MachineInstr &MI, unsigned TypeIdx, return Legalized; } case TargetOpcode::G_FMA: + case TargetOpcode::G_STRICT_FMA: case TargetOpcode::G_FSHR: case TargetOpcode::G_FSHL: { Observer.changingInstr(MI); diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp index 14b47215ce37c..e4cc2a6b73af6 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -702,7 +702,8 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_, getActionDefinitionsBuilder(G_BLOCK_ADDR).legalFor({CodePtr}); auto &FPOpActions = getActionDefinitionsBuilder( - { G_FADD, G_FMUL, G_FMA, G_FCANONICALIZE}) + { G_FADD, G_FMUL, G_FMA, G_FCANONICALIZE, + G_STRICT_FADD, G_STRICT_FMUL, G_STRICT_FMA}) .legalFor({S32, S64}); auto &TrigActions = getActionDefinitionsBuilder({G_FSIN, G_FCOS}) .customFor({S32, S64}); diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp index 5d13321e9eb08..f1e80454b6271 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp @@ -3710,6 +3710,9 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { case AMDGPU::G_FMAXNUM_IEEE: case AMDGPU::G_FCANONICALIZE: case AMDGPU::G_INTRINSIC_TRUNC: + case AMDGPU::G_STRICT_FADD: + case AMDGPU::G_STRICT_FSUB: + case AMDGPU::G_STRICT_FMA: case AMDGPU::G_BSWAP: // TODO: Somehow expand for scalar? case AMDGPU::G_FSHR: // TODO: Expand for scalar case AMDGPU::G_AMDGPU_FMIN_LEGACY: diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/strict_fma.f16.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/strict_fma.f16.ll new file mode 100644 index 0000000000000..5c44d049d65e6 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/strict_fma.f16.ll @@ -0,0 +1,164 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX9 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji < %s | FileCheck -check-prefixes=GCN,GFX8 %s + +define half @v_constained_fma_f16_fpexcept_strict(half %x, half %y, half %z) #0 { +; GCN-LABEL: v_constained_fma_f16_fpexcept_strict: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_fma_f16 v0, v0, v1, v2 +; GCN-NEXT: s_setpc_b64 s[30:31] + %val = call half @llvm.experimental.constrained.fma.f16(half %x, half %y, half %z, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret half %val +} + +define <2 x half> @v_constained_fma_v2f16_fpexcept_strict(<2 x half> %x, <2 x half> %y, <2 x half> %z) #0 { +; GFX9-LABEL: v_constained_fma_v2f16_fpexcept_strict: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_fma_f16 v0, v0, v1, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_constained_fma_v2f16_fpexcept_strict: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v1 +; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v2 +; GFX8-NEXT: v_fma_f16 v0, v0, v1, v2 +; GFX8-NEXT: v_fma_f16 v1, v3, v4, v5 +; GFX8-NEXT: v_mov_b32_e32 v2, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX8-NEXT: s_setpc_b64 s[30:31] + %val = call <2 x half> @llvm.experimental.constrained.fma.v2f16(<2 x half> %x, <2 x half> %y, <2 x half> %z, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret <2 x half> %val +} + +define <3 x half> @v_constained_fma_v3f16_fpexcept_strict(<3 x half> %x, <3 x half> %y, <3 x half> %z) #0 { +; GFX9-LABEL: v_constained_fma_v3f16_fpexcept_strict: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_fma_f16 v0, v0, v2, v4 +; GFX9-NEXT: v_pk_fma_f16 v1, v1, v3, v5 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_constained_fma_v3f16_fpexcept_strict: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v0 +; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v2 +; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v4 +; GFX8-NEXT: v_fma_f16 v0, v0, v2, v4 +; GFX8-NEXT: v_fma_f16 v2, v6, v7, v8 +; GFX8-NEXT: v_fma_f16 v1, v1, v3, v5 +; GFX8-NEXT: v_mov_b32_e32 v3, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX8-NEXT: v_bfe_u32 v1, v1, 0, 16 +; GFX8-NEXT: s_setpc_b64 s[30:31] + %val = call <3 x half> @llvm.experimental.constrained.fma.v3f16(<3 x half> %x, <3 x half> %y, <3 x half> %z, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret <3 x half> %val +} + +define <4 x half> @v_constained_fma_v4f16_fpexcept_strict(<4 x half> %x, <4 x half> %y, <4 x half> %z) #0 { +; GFX9-LABEL: v_constained_fma_v4f16_fpexcept_strict: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_fma_f16 v0, v0, v2, v4 +; GFX9-NEXT: v_pk_fma_f16 v1, v1, v3, v5 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_constained_fma_v4f16_fpexcept_strict: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v0 +; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v2 +; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v4 +; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v3 +; GFX8-NEXT: v_lshrrev_b32_e32 v11, 16, v5 +; GFX8-NEXT: v_fma_f16 v0, v0, v2, v4 +; GFX8-NEXT: v_fma_f16 v2, v6, v8, v10 +; GFX8-NEXT: v_mov_b32_e32 v4, 16 +; GFX8-NEXT: v_fma_f16 v1, v1, v3, v5 +; GFX8-NEXT: v_fma_f16 v3, v7, v9, v11 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; GFX8-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX8-NEXT: s_setpc_b64 s[30:31] + %val = call <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half> %x, <4 x half> %y, <4 x half> %z, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret <4 x half> %val +} + +define half @v_constained_fma_f16_fpexcept_strict_fneg(half %x, half %y, half %z) #0 { +; GCN-LABEL: v_constained_fma_f16_fpexcept_strict_fneg: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_fma_f16 v0, v0, v1, -v2 +; GCN-NEXT: s_setpc_b64 s[30:31] + %neg.z = fneg half %z + %val = call half @llvm.experimental.constrained.fma.f16(half %x, half %y, half %neg.z, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret half %val +} + +define half @v_constained_fma_f16_fpexcept_strict_fneg_fneg(half %x, half %y, half %z) #0 { +; GCN-LABEL: v_constained_fma_f16_fpexcept_strict_fneg_fneg: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_fma_f16 v0, -v0, -v1, v2 +; GCN-NEXT: s_setpc_b64 s[30:31] + %neg.x = fneg half %x + %neg.y = fneg half %y + %val = call half @llvm.experimental.constrained.fma.f16(half %neg.x, half %neg.y, half %z, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret half %val +} + +define half @v_constained_fma_f16_fpexcept_strict_fabs_fabs(half %x, half %y, half %z) #0 { +; GCN-LABEL: v_constained_fma_f16_fpexcept_strict_fabs_fabs: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_fma_f16 v0, |v0|, |v1|, v2 +; GCN-NEXT: s_setpc_b64 s[30:31] + %neg.x = call half @llvm.fabs.f16(half %x) + %neg.y = call half @llvm.fabs.f16(half %y) + %val = call half @llvm.experimental.constrained.fma.f16(half %neg.x, half %neg.y, half %z, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret half %val +} + +define <2 x half> @v_constained_fma_v2f16_fpexcept_strict_fneg_fneg(<2 x half> %x, <2 x half> %y, <2 x half> %z) #0 { +; GFX9-LABEL: v_constained_fma_v2f16_fpexcept_strict_fneg_fneg: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_fma_f16 v0, v0, v1, v2 neg_lo:[1,1,0] neg_hi:[1,1,0] +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_constained_fma_v2f16_fpexcept_strict_fneg_fneg: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_xor_b32_e32 v0, 0x80008000, v0 +; GFX8-NEXT: v_xor_b32_e32 v1, 0x80008000, v1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v1 +; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v2 +; GFX8-NEXT: v_fma_f16 v0, v0, v1, v2 +; GFX8-NEXT: v_fma_f16 v1, v3, v4, v5 +; GFX8-NEXT: v_mov_b32_e32 v2, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX8-NEXT: s_setpc_b64 s[30:31] + %neg.x = fneg <2 x half> %x + %neg.y = fneg <2 x half> %y + %val = call <2 x half> @llvm.experimental.constrained.fma.v2f16(<2 x half> %neg.x, <2 x half> %neg.y, <2 x half> %z, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret <2 x half> %val +} + +declare half @llvm.fabs.f16(half) #1 +declare half @llvm.experimental.constrained.fma.f16(half, half, half, metadata, metadata) #1 +declare <2 x half> @llvm.experimental.constrained.fma.v2f16(<2 x half>, <2 x half>, <2 x half>, metadata, metadata) #1 +declare <3 x half> @llvm.experimental.constrained.fma.v3f16(<3 x half>, <3 x half>, <3 x half>, metadata, metadata) #1 +declare <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half>, <4 x half>, <4 x half>, metadata, metadata) #1 + +attributes #0 = { strictfp } +attributes #1 = { inaccessiblememonly nounwind willreturn } diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/strict_fma.f32.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/strict_fma.f32.ll new file mode 100644 index 0000000000000..5c36ef18724ad --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/strict_fma.f32.ll @@ -0,0 +1,105 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefix=GCN %s + +define float @v_constained_fma_f32_fpexcept_strict(float %x, float %y, float %z) #0 { +; GCN-LABEL: v_constained_fma_f32_fpexcept_strict: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_fma_f32 v0, v0, v1, v2 +; GCN-NEXT: s_setpc_b64 s[30:31] + %val = call float @llvm.experimental.constrained.fma.f32(float %x, float %y, float %z, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret float %val +} + +define <2 x float> @v_constained_fma_v2f32_fpexcept_strict(<2 x float> %x, <2 x float> %y, <2 x float> %z) #0 { +; GCN-LABEL: v_constained_fma_v2f32_fpexcept_strict: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_fma_f32 v0, v0, v2, v4 +; GCN-NEXT: v_fma_f32 v1, v1, v3, v5 +; GCN-NEXT: s_setpc_b64 s[30:31] + %val = call <2 x float> @llvm.experimental.constrained.fma.v2f32(<2 x float> %x, <2 x float> %y, <2 x float> %z, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret <2 x float> %val +} + +define <3 x float> @v_constained_fma_v3f32_fpexcept_strict(<3 x float> %x, <3 x float> %y, <3 x float> %z) #0 { +; GCN-LABEL: v_constained_fma_v3f32_fpexcept_strict: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_fma_f32 v0, v0, v3, v6 +; GCN-NEXT: v_fma_f32 v1, v1, v4, v7 +; GCN-NEXT: v_fma_f32 v2, v2, v5, v8 +; GCN-NEXT: s_setpc_b64 s[30:31] + %val = call <3 x float> @llvm.experimental.constrained.fma.v3f32(<3 x float> %x, <3 x float> %y, <3 x float> %z, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret <3 x float> %val +} + +define <4 x float> @v_constained_fma_v4f32_fpexcept_strict(<4 x float> %x, <4 x float> %y, <4 x float> %z) #0 { +; GCN-LABEL: v_constained_fma_v4f32_fpexcept_strict: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_fma_f32 v0, v0, v4, v8 +; GCN-NEXT: v_fma_f32 v1, v1, v5, v9 +; GCN-NEXT: v_fma_f32 v2, v2, v6, v10 +; GCN-NEXT: v_fma_f32 v3, v3, v7, v11 +; GCN-NEXT: s_setpc_b64 s[30:31] + %val = call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %x, <4 x float> %y, <4 x float> %z, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret <4 x float> %val +} + +define float @v_constained_fma_f32_fpexcept_strict_fneg(float %x, float %y, float %z) #0 { +; GCN-LABEL: v_constained_fma_f32_fpexcept_strict_fneg: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_fma_f32 v0, v0, v1, -v2 +; GCN-NEXT: s_setpc_b64 s[30:31] + %neg.z = fneg float %z + %val = call float @llvm.experimental.constrained.fma.f32(float %x, float %y, float %neg.z, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret float %val +} + +define float @v_constained_fma_f32_fpexcept_strict_fneg_fneg(float %x, float %y, float %z) #0 { +; GCN-LABEL: v_constained_fma_f32_fpexcept_strict_fneg_fneg: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_fma_f32 v0, -v0, -v1, v2 +; GCN-NEXT: s_setpc_b64 s[30:31] + %neg.x = fneg float %x + %neg.y = fneg float %y + %val = call float @llvm.experimental.constrained.fma.f32(float %neg.x, float %neg.y, float %z, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret float %val +} + +define float @v_constained_fma_f32_fpexcept_strict_fabs_fabs(float %x, float %y, float %z) #0 { +; GCN-LABEL: v_constained_fma_f32_fpexcept_strict_fabs_fabs: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_fma_f32 v0, |v0|, |v1|, v2 +; GCN-NEXT: s_setpc_b64 s[30:31] + %neg.x = call float @llvm.fabs.f32(float %x) + %neg.y = call float @llvm.fabs.f32(float %y) + %val = call float @llvm.experimental.constrained.fma.f32(float %neg.x, float %neg.y, float %z, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret float %val +} + +define <2 x float> @v_constained_fma_v2f32_fpexcept_strict_fneg_fneg(<2 x float> %x, <2 x float> %y, <2 x float> %z) #0 { +; GCN-LABEL: v_constained_fma_v2f32_fpexcept_strict_fneg_fneg: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_fma_f32 v0, -v0, -v2, v4 +; GCN-NEXT: v_fma_f32 v1, -v1, -v3, v5 +; GCN-NEXT: s_setpc_b64 s[30:31] + %neg.x = fneg <2 x float> %x + %neg.y = fneg <2 x float> %y + %val = call <2 x float> @llvm.experimental.constrained.fma.v2f32(<2 x float> %neg.x, <2 x float> %neg.y, <2 x float> %z, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret <2 x float> %val +} + +declare float @llvm.fabs.f32(float) #1 +declare float @llvm.experimental.constrained.fma.f32(float, float, float, metadata, metadata) #1 +declare <2 x float> @llvm.experimental.constrained.fma.v2f32(<2 x float>, <2 x float>, <2 x float>, metadata, metadata) #1 +declare <3 x float> @llvm.experimental.constrained.fma.v3f32(<3 x float>, <3 x float>, <3 x float>, metadata, metadata) #1 +declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, metadata, metadata) #1 + +attributes #0 = { strictfp } +attributes #1 = { inaccessiblememonly nounwind willreturn } diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/strict_fma.f64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/strict_fma.f64.ll new file mode 100644 index 0000000000000..0c11e8fe465f9 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/strict_fma.f64.ll @@ -0,0 +1,105 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefix=GCN %s + +define double @v_constained_fma_f64_fpexcept_strict(double %x, double %y, double %z) #0 { +; GCN-LABEL: v_constained_fma_f64_fpexcept_strict: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_fma_f64 v[0:1], v[0:1], v[2:3], v[4:5] +; GCN-NEXT: s_setpc_b64 s[30:31] + %val = call double @llvm.experimental.constrained.fma.f64(double %x, double %y, double %z, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret double %val +} + +define <2 x double> @v_constained_fma_v2f64_fpexcept_strict(<2 x double> %x, <2 x double> %y, <2 x double> %z) #0 { +; GCN-LABEL: v_constained_fma_v2f64_fpexcept_strict: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_fma_f64 v[0:1], v[0:1], v[4:5], v[8:9] +; GCN-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[10:11] +; GCN-NEXT: s_setpc_b64 s[30:31] + %val = call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %x, <2 x double> %y, <2 x double> %z, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret <2 x double> %val +} + +define <3 x double> @v_constained_fma_v3f64_fpexcept_strict(<3 x double> %x, <3 x double> %y, <3 x double> %z) #0 { +; GCN-LABEL: v_constained_fma_v3f64_fpexcept_strict: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_fma_f64 v[0:1], v[0:1], v[6:7], v[12:13] +; GCN-NEXT: v_fma_f64 v[2:3], v[2:3], v[8:9], v[14:15] +; GCN-NEXT: v_fma_f64 v[4:5], v[4:5], v[10:11], v[16:17] +; GCN-NEXT: s_setpc_b64 s[30:31] + %val = call <3 x double> @llvm.experimental.constrained.fma.v3f64(<3 x double> %x, <3 x double> %y, <3 x double> %z, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret <3 x double> %val +} + +define <4 x double> @v_constained_fma_v4f64_fpexcept_strict(<4 x double> %x, <4 x double> %y, <4 x double> %z) #0 { +; GCN-LABEL: v_constained_fma_v4f64_fpexcept_strict: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_fma_f64 v[0:1], v[0:1], v[8:9], v[16:17] +; GCN-NEXT: v_fma_f64 v[2:3], v[2:3], v[10:11], v[18:19] +; GCN-NEXT: v_fma_f64 v[4:5], v[4:5], v[12:13], v[20:21] +; GCN-NEXT: v_fma_f64 v[6:7], v[6:7], v[14:15], v[22:23] +; GCN-NEXT: s_setpc_b64 s[30:31] + %val = call <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double> %x, <4 x double> %y, <4 x double> %z, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret <4 x double> %val +} + +define double @v_constained_fma_f64_fpexcept_strict_fneg(double %x, double %y, double %z) #0 { +; GCN-LABEL: v_constained_fma_f64_fpexcept_strict_fneg: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_fma_f64 v[0:1], v[0:1], v[2:3], -v[4:5] +; GCN-NEXT: s_setpc_b64 s[30:31] + %neg.z = fneg double %z + %val = call double @llvm.experimental.constrained.fma.f64(double %x, double %y, double %neg.z, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret double %val +} + +define double @v_constained_fma_f64_fpexcept_strict_fneg_fneg(double %x, double %y, double %z) #0 { +; GCN-LABEL: v_constained_fma_f64_fpexcept_strict_fneg_fneg: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_fma_f64 v[0:1], -v[0:1], -v[2:3], v[4:5] +; GCN-NEXT: s_setpc_b64 s[30:31] + %neg.x = fneg double %x + %neg.y = fneg double %y + %val = call double @llvm.experimental.constrained.fma.f64(double %neg.x, double %neg.y, double %z, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret double %val +} + +define double @v_constained_fma_f64_fpexcept_strict_fabs_fabs(double %x, double %y, double %z) #0 { +; GCN-LABEL: v_constained_fma_f64_fpexcept_strict_fabs_fabs: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_fma_f64 v[0:1], |v[0:1]|, |v[2:3]|, v[4:5] +; GCN-NEXT: s_setpc_b64 s[30:31] + %neg.x = call double @llvm.fabs.f64(double %x) + %neg.y = call double @llvm.fabs.f64(double %y) + %val = call double @llvm.experimental.constrained.fma.f64(double %neg.x, double %neg.y, double %z, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret double %val +} + +define <2 x double> @v_constained_fma_v2f64_fpexcept_strict_fneg_fneg(<2 x double> %x, <2 x double> %y, <2 x double> %z) #0 { +; GCN-LABEL: v_constained_fma_v2f64_fpexcept_strict_fneg_fneg: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_fma_f64 v[0:1], -v[0:1], -v[4:5], v[8:9] +; GCN-NEXT: v_fma_f64 v[2:3], -v[2:3], -v[6:7], v[10:11] +; GCN-NEXT: s_setpc_b64 s[30:31] + %neg.x = fneg <2 x double> %x + %neg.y = fneg <2 x double> %y + %val = call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %neg.x, <2 x double> %neg.y, <2 x double> %z, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret <2 x double> %val +} + +declare double @llvm.fabs.f64(double) #1 +declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata) #1 +declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, metadata, metadata) #1 +declare <3 x double> @llvm.experimental.constrained.fma.v3f64(<3 x double>, <3 x double>, <3 x double>, metadata, metadata) #1 +declare <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, metadata, metadata) #1 + +attributes #0 = { strictfp } +attributes #1 = { inaccessiblememonly nounwind willreturn }