diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td index c6f3a492683e4..3a089e0762f1c 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -349,4 +349,12 @@ let TargetPrefix = "riscv" in { [IntrNoMem]>, RISCVVIntrinsic { let ExtendOperand = 2; } + + def int_riscv_vfmv_f_s : Intrinsic<[LLVMVectorElementType<0>], + [llvm_anyfloat_ty], + [IntrNoMem]>, RISCVVIntrinsic; + def int_riscv_vfmv_s_f : Intrinsic<[llvm_anyfloat_ty], + [LLVMMatchType<0>, LLVMVectorElementType<0>, + llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; } // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td index 3c587636144fd..677d9f392bb34 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td @@ -980,8 +980,9 @@ let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1 in { // Floating-Point Scalar Move Instructions def VFMV_F_S : RVInstV<0b010000, 0b00000, OPFVV, (outs FPR32:$vd), (ins VR:$vs2), "vfmv.f.s", "$vd, $vs2">; -def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd), - (ins FPR32:$rs1), "vfmv.s.f", "$vd, $rs1">; +let Constraints = "$vd = $vd_wb" in +def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd_wb), + (ins VR:$vd, FPR32:$rs1), "vfmv.s.f", "$vd, $rs1">; } // hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1 } // Predicates = [HasStdExtV, HasStdExtF] diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 4927b82ec6615..92340785d861a 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -1370,12 +1370,37 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1, Constraints = "$rd = $rs1" in def PseudoVMV_S_X # "_" # m.MX: Pseudo<(outs m.vrclass:$rd), (ins m.vrclass:$rs1, GPR:$rs2, - GPR:$vl, ixlenimm:$sew), + GPR:$vl, ixlenimm:$sew), []>, RISCVVPseudo; } } } +} // Predicates = [HasStdExtV] + +//===----------------------------------------------------------------------===// +// 17.2. Floating-Point Scalar Move Instructions +//===----------------------------------------------------------------------===// + +let Predicates = [HasStdExtV, HasStdExtF] in { +let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1, + Uses = [VL, VTYPE] in { + foreach m = MxList.m in { + let VLMul = m.value in { + let SEWIndex = 2, BaseInstr = VFMV_F_S in + def PseudoVFMV_F_S # "_" # m.MX : Pseudo<(outs FPR32:$rd), + (ins m.vrclass:$rs2, + ixlenimm:$sew), + []>, RISCVVPseudo; + let VLIndex = 3, SEWIndex = 4, BaseInstr = VFMV_S_F, + Constraints = "$rd = $rs1" in + def PseudoVFMV_S_F # "_" # m.MX : Pseudo<(outs m.vrclass:$rd), + (ins m.vrclass:$rs1, FPR32:$rs2, + GPR:$vl, ixlenimm:$sew), + []>, RISCVVPseudo; + } + } } +} // Predicates = [HasStdExtV, HasStdExtF] //===----------------------------------------------------------------------===// // Patterns. @@ -1557,3 +1582,34 @@ foreach vti = AllIntegerVectors in { (vti.Vector $rs1), $rs2, (NoX0 GPR:$vl), vti.SEW)>; } } // Predicates = [HasStdExtV] + +//===----------------------------------------------------------------------===// +// 17.2. Floating-Point Scalar Move Instructions +//===----------------------------------------------------------------------===// + +let Predicates = [HasStdExtV, HasStdExtF] in { +foreach fvti = AllFloatVectors in { + defvar instr = !cast("PseudoVFMV_F_S_" # fvti.LMul.MX); + def : Pat<(fvti.Scalar (int_riscv_vfmv_f_s (fvti.Vector fvti.RegClass:$rs2))), + // Floating point instructions with a scalar result will always + // generate the result in a register of class FPR32. When dealing + // with the f64 variant of a pattern we need to promote the FPR32 + // subregister generated by the instruction to the FPR64 base + // register expected by the type in the pattern + !cond(!eq(!cast(fvti.ScalarRegClass), + !cast(FPR64)): + (SUBREG_TO_REG (i32 -1), + (instr $rs2, fvti.SEW), sub_32), + !eq(!cast(fvti.ScalarRegClass), + !cast(FPR16)): + (EXTRACT_SUBREG (instr $rs2, fvti.SEW), sub_16), + !eq(1, 1): + (instr $rs2, fvti.SEW))>; + + def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1), + (fvti.Scalar fvti.ScalarRegClass:$rs2), GPR:$vl)), + (!cast("PseudoVFMV_S_F_" # fvti.LMul.MX) + (fvti.Vector $rs1), ToFPR32.ret, + (NoX0 GPR:$vl), fvti.SEW)>; +} +} // Predicates = [HasStdExtV, HasStdExtF] diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll new file mode 100644 index 0000000000000..18ca597f06bee --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll @@ -0,0 +1,204 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-v,+experimental-zfh -target-abi lp64d -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-v,+experimental-zfh -target-abi ilp32d -verify-machineinstrs < %s | FileCheck %s + +declare half @llvm.riscv.vfmv.f.s.nxv1f16() + +define half @intrinsic_vfmv.f.s_s_nxv1f16( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f +; CHECK-NEXT: ret +entry: + %a = call half @llvm.riscv.vfmv.f.s.nxv1f16( %0) + ret half %a +} + +declare half @llvm.riscv.vfmv.f.s.nxv2f16() + +define half @intrinsic_vfmv.f.s_s_nxv2f16( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f +; CHECK-NEXT: ret +entry: + %a = call half @llvm.riscv.vfmv.f.s.nxv2f16( %0) + ret half %a +} + +declare half @llvm.riscv.vfmv.f.s.nxv4f16() + +define half @intrinsic_vfmv.f.s_s_nxv4f16( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f +; CHECK-NEXT: ret +entry: + %a = call half @llvm.riscv.vfmv.f.s.nxv4f16( %0) + ret half %a +} + +declare half @llvm.riscv.vfmv.f.s.nxv8f16() + +define half @intrinsic_vfmv.f.s_s_nxv8f16( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f +; CHECK-NEXT: ret +entry: + %a = call half @llvm.riscv.vfmv.f.s.nxv8f16( %0) + ret half %a +} + +declare half @llvm.riscv.vfmv.f.s.nxv16f16() + +define half @intrinsic_vfmv.f.s_s_nxv16f16( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f +; CHECK-NEXT: ret +entry: + %a = call half @llvm.riscv.vfmv.f.s.nxv16f16( %0) + ret half %a +} + +declare half @llvm.riscv.vfmv.f.s.nxv32f16() + +define half @intrinsic_vfmv.f.s_s_nxv32f16( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f +; CHECK-NEXT: ret +entry: + %a = call half @llvm.riscv.vfmv.f.s.nxv32f16( %0) + ret half %a +} + +declare float @llvm.riscv.vfmv.f.s.nxv1f32() + +define float @intrinsic_vfmv.f.s_s_nxv1f32( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: ret +entry: + %a = call float @llvm.riscv.vfmv.f.s.nxv1f32( %0) + ret float %a +} + +declare float @llvm.riscv.vfmv.f.s.nxv2f32() + +define float @intrinsic_vfmv.f.s_s_nxv2f32( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: ret +entry: + %a = call float @llvm.riscv.vfmv.f.s.nxv2f32( %0) + ret float %a +} + +declare float @llvm.riscv.vfmv.f.s.nxv4f32() + +define float @intrinsic_vfmv.f.s_s_nxv4f32( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: ret +entry: + %a = call float @llvm.riscv.vfmv.f.s.nxv4f32( %0) + ret float %a +} + +declare float @llvm.riscv.vfmv.f.s.nxv8f32() + +define float @intrinsic_vfmv.f.s_s_nxv8f32( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: ret +entry: + %a = call float @llvm.riscv.vfmv.f.s.nxv8f32( %0) + ret float %a +} + +declare float @llvm.riscv.vfmv.f.s.nxv16f32() + +define float @intrinsic_vfmv.f.s_s_nxv16f32( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e32,m8,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: ret +entry: + %a = call float @llvm.riscv.vfmv.f.s.nxv16f32( %0) + ret float %a +} + +declare double @llvm.riscv.vfmv.f.s.nxv1f64() + +define double @intrinsic_vfmv.f.s_s_nxv1f64( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: ret +entry: + %a = call double @llvm.riscv.vfmv.f.s.nxv1f64( %0) + ret double %a +} + +declare double @llvm.riscv.vfmv.f.s.nxv2f64() + +define double @intrinsic_vfmv.f.s_s_nxv2f64( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: ret +entry: + %a = call double @llvm.riscv.vfmv.f.s.nxv2f64( %0) + ret double %a +} + +declare double @llvm.riscv.vfmv.f.s.nxv4f64() + +define double @intrinsic_vfmv.f.s_s_nxv4f64( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: ret +entry: + %a = call double @llvm.riscv.vfmv.f.s.nxv4f64( %0) + ret double %a +} + +declare double @llvm.riscv.vfmv.f.s.nxv8f64() + +define double @intrinsic_vfmv.f.s_s_nxv8f64( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: ret +entry: + %a = call double @llvm.riscv.vfmv.f.s.nxv8f64( %0) + ret double %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll new file mode 100644 index 0000000000000..ecd5daaab8f9b --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll @@ -0,0 +1,203 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-v,+experimental-zfh -target-abi ilp32d -verify-machineinstrs < %s | FileCheck %s + +declare @llvm.riscv.vfmv.s.f.nxv1f16(, half, i32) + +define @intrinsic_vfmv.s.f_f_nxv1f16( %0, half %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv1f16( %0, half %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv2f16(, half, i32) + +define @intrinsic_vfmv.s.f_f_nxv2f16( %0, half %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv2f16( %0, half %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv4f16(, half, i32) + +define @intrinsic_vfmv.s.f_f_nxv4f16( %0, half %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv4f16( %0, half %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv8f16(, half, i32) + +define @intrinsic_vfmv.s.f_f_nxv8f16( %0, half %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv8f16( %0, half %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv16f16(, half, i32) + +define @intrinsic_vfmv.s.f_f_nxv16f16( %0, half %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv16f16( %0, half %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv32f16(, half, i32) + +define @intrinsic_vfmv.s.f_f_nxv32f16( %0, half %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv32f16( %0, half %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv1f32(, float, i32) + +define @intrinsic_vfmv.s.f_f_nxv1f32( %0, float %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv1f32( %0, float %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv2f32(, float, i32) + +define @intrinsic_vfmv.s.f_f_nxv2f32( %0, float %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv2f32( %0, float %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv4f32(, float, i32) + +define @intrinsic_vfmv.s.f_f_nxv4f32( %0, float %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv4f32( %0, float %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv8f32(, float, i32) + +define @intrinsic_vfmv.s.f_f_nxv8f32( %0, float %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv8f32( %0, float %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv16f32(, float, i32) + +define @intrinsic_vfmv.s.f_f_nxv16f32( %0, float %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv16f32( %0, float %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv1f64(, double, i32) + +define @intrinsic_vfmv.s.f_f_nxv1f64( %0, double %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv1f64( %0, double %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv2f64(, double, i32) + +define @intrinsic_vfmv.s.f_f_nxv2f64( %0, double %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv2f64( %0, double %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv4f64(, double, i32) + +define @intrinsic_vfmv.s.f_f_nxv4f64( %0, double %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv4f64( %0, double %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv8f64(, double, i32) + +define @intrinsic_vfmv.s.f_f_nxv8f64( %0, double %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv8f64( %0, double %1, i32 %2) + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll new file mode 100644 index 0000000000000..635218d2689bd --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll @@ -0,0 +1,203 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-v,+experimental-zfh -target-abi lp64d -verify-machineinstrs < %s | FileCheck %s + +declare @llvm.riscv.vfmv.s.f.nxv1f16(, half, i64) + +define @intrinsic_vfmv.s.f_f_nxv1f16( %0, half %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv1f16( %0, half %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv2f16(, half, i64) + +define @intrinsic_vfmv.s.f_f_nxv2f16( %0, half %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv2f16( %0, half %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv4f16(, half, i64) + +define @intrinsic_vfmv.s.f_f_nxv4f16( %0, half %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv4f16( %0, half %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv8f16(, half, i64) + +define @intrinsic_vfmv.s.f_f_nxv8f16( %0, half %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv8f16( %0, half %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv16f16(, half, i64) + +define @intrinsic_vfmv.s.f_f_nxv16f16( %0, half %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv16f16( %0, half %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv32f16(, half, i64) + +define @intrinsic_vfmv.s.f_f_nxv32f16( %0, half %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv32f16( %0, half %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv1f32(, float, i64) + +define @intrinsic_vfmv.s.f_f_nxv1f32( %0, float %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv1f32( %0, float %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv2f32(, float, i64) + +define @intrinsic_vfmv.s.f_f_nxv2f32( %0, float %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv2f32( %0, float %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv4f32(, float, i64) + +define @intrinsic_vfmv.s.f_f_nxv4f32( %0, float %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv4f32( %0, float %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv8f32(, float, i64) + +define @intrinsic_vfmv.s.f_f_nxv8f32( %0, float %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv8f32( %0, float %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv16f32(, float, i64) + +define @intrinsic_vfmv.s.f_f_nxv16f32( %0, float %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv16f32( %0, float %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv1f64(, double, i64) + +define @intrinsic_vfmv.s.f_f_nxv1f64( %0, double %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv1f64( %0, double %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv2f64(, double, i64) + +define @intrinsic_vfmv.s.f_f_nxv2f64( %0, double %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv2f64( %0, double %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv4f64(, double, i64) + +define @intrinsic_vfmv.s.f_f_nxv4f64( %0, double %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv4f64( %0, double %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv8f64(, double, i64) + +define @intrinsic_vfmv.s.f_f_nxv8f64( %0, double %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv8f64( %0, double %1, i64 %2) + ret %a +}