diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp index 48e88befa0f01..16f34a89a52ec 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp @@ -722,6 +722,10 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST) bool RISCVLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const { Intrinsic::ID IntrinsicID = cast(MI).getIntrinsicID(); + + if (RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID)) + return true; + switch (IntrinsicID) { default: return false; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index ae9e2fef88673..7a0718e6b5e3e 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -38,6 +38,7 @@ #include "llvm/IR/DiagnosticPrinter.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/IntrinsicsRISCV.h" #include "llvm/MC/MCCodeEmitter.h" #include "llvm/MC/MCInstBuilder.h" @@ -24894,6 +24895,12 @@ bool RISCVTargetLowering::fallBackToDAGISel(const Instruction &Inst) const { Op == Instruction::Freeze || Op == Instruction::Store) return false; + if (auto *II = dyn_cast(&Inst)) { + // Mark RVV intrinsic as supported. + if (RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(II->getIntrinsicID())) + return false; + } + if (Inst.getType()->isScalableTy()) return true; diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/fallback.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/fallback.ll index 49276c9416234..8e43e044b7ee5 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/fallback.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/fallback.ll @@ -2,39 +2,6 @@ ; RUN: FileCheck %s --check-prefix=FALLBACK-WITH-REPORT-OUT < %t.out ; RUN: FileCheck %s --check-prefix=FALLBACK-WITH-REPORT-ERR < %t.err - -declare @llvm.riscv.vadd.nxv1i8.nxv1i8( - , - , - , - i64) - -; FALLBACK_WITH_REPORT_ERR: :0:0: unable to translate instruction: call -; FALLBACK-WITH-REPORT-OUT-LABEL: scalable_arg -define @scalable_arg( %0, %1, i64 %2) nounwind { -entry: - %a = call @llvm.riscv.vadd.nxv1i8.nxv1i8( - undef, - %0, - %1, - i64 %2) - - ret %a -} - -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to translate instruction: call -; FALLBACK-WITH-REPORT-OUT-LABEL: scalable_inst -define @scalable_inst(i64 %0) nounwind { -entry: - %a = call @llvm.riscv.vadd.nxv1i8.nxv1i8( - undef, - undef, - undef, - i64 %0) - - ret %a -} - ; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to translate instruction: alloca: ; FALLBACK-WITH-REPORT-OUT-LABEL: scalable_alloca define void @scalable_alloca() #1 { diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll new file mode 100644 index 0000000000000..8a9cc150907dc --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll @@ -0,0 +1,764 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d -global-isel | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d -global-isel | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d -global-isel | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfhmin,+zvfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d -global-isel | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfhmin,+zvfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d -global-isel | FileCheck %s + +declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv1f16.nxv1f16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv2f16.nxv2f16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv2f16.nxv2f16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv4f16.nxv4f16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv4f16.nxv4f16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv8f16.nxv8f16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv8f16.nxv8f16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv16f16.nxv16f16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv16f16.nxv16f16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv32f16.nxv32f16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv32f16.nxv32f16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv1f32.nxv1f32( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv1f32.nxv1f32( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv2f32.nxv2f32( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv2f32.nxv2f32( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv4f32.nxv4f32( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv4f32.nxv4f32( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv8f32.nxv8f32( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv8f32.nxv8f32( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv16f32.nxv16f32( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv16f32.nxv16f32( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv1f64.nxv1f64( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv1f64.nxv1f64( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv2f64.nxv2f64( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv2f64.nxv2f64( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv4f64.nxv4f64( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv4f64.nxv4f64( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv8f64.nxv8f64( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv8f64.nxv8f64( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +}