diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp index 2fd73275721b1..25ecc235d6dc4 100644 --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -357,6 +357,11 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM, setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); + setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Custom); + setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Custom); + setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Custom); + setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Custom); + if (Subtarget.hasMips32r2() || getTargetMachine().getTargetTriple().isOSLinux()) setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom); @@ -661,7 +666,8 @@ static bool invertFPCondCodeUser(Mips::CondCode CC) { // Returns Op if setcc is not a floating point comparison. static SDValue createFPCmp(SelectionDAG &DAG, const SDValue &Op) { // must be a SETCC node - if (Op.getOpcode() != ISD::SETCC) + if (Op.getOpcode() != ISD::SETCC && Op.getOpcode() != ISD::STRICT_FSETCC && + Op.getOpcode() != ISD::STRICT_FSETCCS) return Op; SDValue LHS = Op.getOperand(0); @@ -1338,6 +1344,9 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const case ISD::JumpTable: return lowerJumpTable(Op, DAG); case ISD::SELECT: return lowerSELECT(Op, DAG); case ISD::SETCC: return lowerSETCC(Op, DAG); + case ISD::STRICT_FSETCC: + case ISD::STRICT_FSETCCS: + return lowerFSETCC(Op, DAG); case ISD::VASTART: return lowerVASTART(Op, DAG); case ISD::VAARG: return lowerVAARG(Op, DAG); case ISD::FCOPYSIGN: return lowerFCOPYSIGN(Op, DAG); @@ -2227,6 +2236,24 @@ SDValue MipsTargetLowering::lowerSETCC(SDValue Op, SelectionDAG &DAG) const { return createCMovFP(DAG, Cond, True, False, DL); } +SDValue MipsTargetLowering::lowerFSETCC(SDValue Op, SelectionDAG &DAG) const { + assert(!Subtarget.hasMips32r6() && !Subtarget.hasMips64r6()); + + SDLoc DL(Op); + SDValue Chain = Op.getOperand(0); + SDValue LHS = Op.getOperand(1); + SDValue RHS = Op.getOperand(2); + ISD::CondCode CC = cast(Op.getOperand(3))->get(); + + SDValue Cond = DAG.getNode(MipsISD::FPCmp, DL, MVT::Glue, LHS, RHS, + DAG.getConstant(condCodeToFCC(CC), DL, MVT::i32)); + SDValue True = DAG.getConstant(1, DL, MVT::i32); + SDValue False = DAG.getConstant(0, DL, MVT::i32); + SDValue CMovFP = createCMovFP(DAG, Cond, True, False, DL); + + return DAG.getMergeValues({CMovFP, Chain}, DL); +} + SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { EVT Ty = Op.getValueType(); diff --git a/llvm/lib/Target/Mips/MipsISelLowering.h b/llvm/lib/Target/Mips/MipsISelLowering.h index 25a0bf9b797d5..68f62bb3d375e 100644 --- a/llvm/lib/Target/Mips/MipsISelLowering.h +++ b/llvm/lib/Target/Mips/MipsISelLowering.h @@ -572,6 +572,7 @@ class TargetRegisterClass; SDValue lowerJumpTable(SDValue Op, SelectionDAG &DAG) const; SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG) const; SDValue lowerSETCC(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerFSETCC(SDValue Op, SelectionDAG &DAG) const; SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const; SDValue lowerVAARG(SDValue Op, SelectionDAG &DAG) const; SDValue lowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/test/CodeGen/Mips/fp-strict-fcmp.ll b/llvm/test/CodeGen/Mips/fp-strict-fcmp.ll new file mode 100644 index 0000000000000..5decd9c424f70 --- /dev/null +++ b/llvm/test/CodeGen/Mips/fp-strict-fcmp.ll @@ -0,0 +1,586 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=mips -mcpu=mips32r2 < %s | FileCheck %s + +define i32 @fcmp_olt_f32(float %a, float %b) #0 { +; CHECK-LABEL: fcmp_olt_f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.olt.s $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movf $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"olt", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_ole_f32(float %a, float %b) #0 { +; CHECK-LABEL: fcmp_ole_f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ole.s $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movf $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ole", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_ogt_f32(float %a, float %b) #0 { +; CHECK-LABEL: fcmp_ogt_f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ule.s $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movt $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ogt", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_oge_f32(float %a, float %b) #0 { +; CHECK-LABEL: fcmp_oge_f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ult.s $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movt $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"oge", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_oeq_f32(float %a, float %b) #0 { +; CHECK-LABEL: fcmp_oeq_f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.eq.s $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movf $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"oeq", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_one_f32(float %a, float %b) #0 { +; CHECK-LABEL: fcmp_one_f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ueq.s $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movt $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"one", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_ult_f32(float %a, float %b) #0 { +; CHECK-LABEL: fcmp_ult_f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ult.s $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movf $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ult", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_ule_f32(float %a, float %b) #0 { +; CHECK-LABEL: fcmp_ule_f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ule.s $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movf $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ule", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_ugt_f32(float %a, float %b) #0 { +; CHECK-LABEL: fcmp_ugt_f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ole.s $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movt $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ugt", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_uge_f32(float %a, float %b) #0 { +; CHECK-LABEL: fcmp_uge_f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.olt.s $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movt $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"uge", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_ueq_f32(float %a, float %b) #0 { +; CHECK-LABEL: fcmp_ueq_f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ueq.s $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movf $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ueq", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_une_f32(float %a, float %b) #0 { +; CHECK-LABEL: fcmp_une_f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.eq.s $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movt $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"une", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_olt_f32(float %a, float %b) #0 { +; CHECK-LABEL: fcmps_olt_f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.olt.s $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movf $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"olt", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_ole_f32(float %a, float %b) #0 { +; CHECK-LABEL: fcmps_ole_f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ole.s $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movf $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ole", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_ogt_f32(float %a, float %b) #0 { +; CHECK-LABEL: fcmps_ogt_f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ule.s $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movt $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ogt", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_oge_f32(float %a, float %b) #0 { +; CHECK-LABEL: fcmps_oge_f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ult.s $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movt $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"oge", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_oeq_f32(float %a, float %b) #0 { +; CHECK-LABEL: fcmps_oeq_f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.eq.s $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movf $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"oeq", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_one_f32(float %a, float %b) #0 { +; CHECK-LABEL: fcmps_one_f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ueq.s $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movt $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"one", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_ult_f32(float %a, float %b) #0 { +; CHECK-LABEL: fcmps_ult_f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ult.s $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movf $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ult", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_ule_f32(float %a, float %b) #0 { +; CHECK-LABEL: fcmps_ule_f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ule.s $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movf $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ule", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_ugt_f32(float %a, float %b) #0 { +; CHECK-LABEL: fcmps_ugt_f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ole.s $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movt $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ugt", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_uge_f32(float %a, float %b) #0 { +; CHECK-LABEL: fcmps_uge_f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.olt.s $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movt $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"uge", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_ueq_f32(float %a, float %b) #0 { +; CHECK-LABEL: fcmps_ueq_f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ueq.s $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movf $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ueq", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_une_f32(float %a, float %b) #0 { +; CHECK-LABEL: fcmps_une_f32: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.eq.s $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movt $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"une", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_olt_f64(double %a, double %b) #0 { +; CHECK-LABEL: fcmp_olt_f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.olt.d $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movf $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"olt", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_ole_f64(double %a, double %b) #0 { +; CHECK-LABEL: fcmp_ole_f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ole.d $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movf $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ole", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_ogt_f64(double %a, double %b) #0 { +; CHECK-LABEL: fcmp_ogt_f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ule.d $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movt $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ogt", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_oge_f64(double %a, double %b) #0 { +; CHECK-LABEL: fcmp_oge_f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ult.d $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movt $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oge", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_oeq_f64(double %a, double %b) #0 { +; CHECK-LABEL: fcmp_oeq_f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.eq.d $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movf $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_one_f64(double %a, double %b) #0 { +; CHECK-LABEL: fcmp_one_f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ueq.d $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movt $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"one", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_ult_f64(double %a, double %b) #0 { +; CHECK-LABEL: fcmp_ult_f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ult.d $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movf $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ult", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_ule_f64(double %a, double %b) #0 { +; CHECK-LABEL: fcmp_ule_f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ule.d $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movf $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ule", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_ugt_f64(double %a, double %b) #0 { +; CHECK-LABEL: fcmp_ugt_f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ole.d $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movt $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ugt", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_uge_f64(double %a, double %b) #0 { +; CHECK-LABEL: fcmp_uge_f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.olt.d $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movt $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"uge", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_ueq_f64(double %a, double %b) #0 { +; CHECK-LABEL: fcmp_ueq_f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ueq.d $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movf $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ueq", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmp_une_f64(double %a, double %b) #0 { +; CHECK-LABEL: fcmp_une_f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.eq.d $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movt $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"une", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_olt_f64(double %a, double %b) #0 { +; CHECK-LABEL: fcmps_olt_f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.olt.d $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movf $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"olt", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_ole_f64(double %a, double %b) #0 { +; CHECK-LABEL: fcmps_ole_f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ole.d $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movf $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ole", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_ogt_f64(double %a, double %b) #0 { +; CHECK-LABEL: fcmps_ogt_f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ule.d $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movt $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ogt", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_oge_f64(double %a, double %b) #0 { +; CHECK-LABEL: fcmps_oge_f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ult.d $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movt $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oge", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_oeq_f64(double %a, double %b) #0 { +; CHECK-LABEL: fcmps_oeq_f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.eq.d $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movf $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_one_f64(double %a, double %b) #0 { +; CHECK-LABEL: fcmps_one_f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ueq.d $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movt $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"one", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_ult_f64(double %a, double %b) #0 { +; CHECK-LABEL: fcmps_ult_f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ult.d $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movf $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ult", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_ule_f64(double %a, double %b) #0 { +; CHECK-LABEL: fcmps_ule_f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ule.d $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movf $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ule", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_ugt_f64(double %a, double %b) #0 { +; CHECK-LABEL: fcmps_ugt_f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ole.d $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movt $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ugt", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_uge_f64(double %a, double %b) #0 { +; CHECK-LABEL: fcmps_uge_f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.olt.d $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movt $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"uge", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_ueq_f64(double %a, double %b) #0 { +; CHECK-LABEL: fcmps_ueq_f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.ueq.d $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movf $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ueq", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @fcmps_une_f64(double %a, double %b) #0 { +; CHECK-LABEL: fcmps_une_f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addiu $2, $zero, 1 +; CHECK-NEXT: c.eq.d $f12, $f14 +; CHECK-NEXT: jr $ra +; CHECK-NEXT: movt $2, $zero, $fcc0 + %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"une", metadata !"fpexcept.strict") #0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + + +attributes #0 = { strictfp } + +declare i1 @llvm.experimental.constrained.fcmps.f32(float, float, metadata, metadata) +declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata) +declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata) +declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata)