diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp index eab9ee916fd2e2..15a75ba411c043 100644 --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -467,27 +467,27 @@ static bool isScalarMoveInstr(const MachineInstr &MI) { case RISCV::PseudoVMV_S_X_MF2: case RISCV::PseudoVMV_S_X_MF4: case RISCV::PseudoVMV_S_X_MF8: - case RISCV::PseudoVFMV_F16_S_M1: - case RISCV::PseudoVFMV_F16_S_M2: - case RISCV::PseudoVFMV_F16_S_M4: - case RISCV::PseudoVFMV_F16_S_M8: - case RISCV::PseudoVFMV_F16_S_MF2: - case RISCV::PseudoVFMV_F16_S_MF4: - case RISCV::PseudoVFMV_F16_S_MF8: - case RISCV::PseudoVFMV_F32_S_M1: - case RISCV::PseudoVFMV_F32_S_M2: - case RISCV::PseudoVFMV_F32_S_M4: - case RISCV::PseudoVFMV_F32_S_M8: - case RISCV::PseudoVFMV_F32_S_MF2: - case RISCV::PseudoVFMV_F32_S_MF4: - case RISCV::PseudoVFMV_F32_S_MF8: - case RISCV::PseudoVFMV_F64_S_M1: - case RISCV::PseudoVFMV_F64_S_M2: - case RISCV::PseudoVFMV_F64_S_M4: - case RISCV::PseudoVFMV_F64_S_M8: - case RISCV::PseudoVFMV_F64_S_MF2: - case RISCV::PseudoVFMV_F64_S_MF4: - case RISCV::PseudoVFMV_F64_S_MF8: + case RISCV::PseudoVFMV_S_F16_M1: + case RISCV::PseudoVFMV_S_F16_M2: + case RISCV::PseudoVFMV_S_F16_M4: + case RISCV::PseudoVFMV_S_F16_M8: + case RISCV::PseudoVFMV_S_F16_MF2: + case RISCV::PseudoVFMV_S_F16_MF4: + case RISCV::PseudoVFMV_S_F16_MF8: + case RISCV::PseudoVFMV_S_F32_M1: + case RISCV::PseudoVFMV_S_F32_M2: + case RISCV::PseudoVFMV_S_F32_M4: + case RISCV::PseudoVFMV_S_F32_M8: + case RISCV::PseudoVFMV_S_F32_MF2: + case RISCV::PseudoVFMV_S_F32_MF4: + case RISCV::PseudoVFMV_S_F32_MF8: + case RISCV::PseudoVFMV_S_F64_M1: + case RISCV::PseudoVFMV_S_F64_M2: + case RISCV::PseudoVFMV_S_F64_M4: + case RISCV::PseudoVFMV_S_F64_M8: + case RISCV::PseudoVFMV_S_F64_MF2: + case RISCV::PseudoVFMV_S_F64_MF4: + case RISCV::PseudoVFMV_S_F64_MF8: return true; } } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll index 7b97b72c958709..c8c50ac8dca9ae 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll @@ -190,6 +190,55 @@ entry: ret %y } +define @test10( %a, double %b) nounwind { +; CHECK-LABEL: test10: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, mu +; CHECK-NEXT: vfmv.s.f v8, ft0 +; CHECK-NEXT: ret +entry: + %x = tail call i64 @llvm.riscv.vsetvlimax(i64 3, i64 0) + %y = call @llvm.riscv.vfmv.s.f.nxv1f64( + %a, double %b, i64 1) + ret %y +} + +define @test11( %a, double %b) nounwind { +; CHECK-LABEL: test11: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetivli a0, 6, e64, m1, tu, mu +; CHECK-NEXT: vfmv.s.f v8, ft0 +; CHECK-NEXT: ret +entry: + %x = tail call i64 @llvm.riscv.vsetvli(i64 6, i64 3, i64 0) + %y = call @llvm.riscv.vfmv.s.f.nxv1f64( + %a, double %b, i64 2) + ret %y +} + +define @test12( %a, double %b, %mask) nounwind { +; CHECK-LABEL: test12: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetivli zero, 9, e64, m1, tu, mu +; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vfmv.s.f v8, ft0 +; CHECK-NEXT: ret +entry: + %x = call @llvm.riscv.vfadd.mask.nxv1f64.f64( + %a, + %a, + %a, + %mask, + i64 9, + i64 0) + %y = call @llvm.riscv.vfmv.s.f.nxv1f64( + %x, double %b, i64 2) + ret %y +} + declare @llvm.riscv.vadd.mask.nxv1i64.nxv1i64( , , @@ -198,10 +247,24 @@ declare @llvm.riscv.vadd.mask.nxv1i64.nxv1i64( i64, i64); +declare @llvm.riscv.vfadd.mask.nxv1f64.f64( + , + , + , + , + i64, + i64); + declare @llvm.riscv.vmv.s.x.nxv1i64( , i64, i64); + +declare @llvm.riscv.vfmv.s.f.nxv1f64 + (, + double, + i64) + declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) declare @llvm.riscv.vle.nxv2i32.i64(* nocapture, i64) declare @llvm.riscv.vmslt.nxv2i32.i32.i64(, i32, i64)