467 changes: 378 additions & 89 deletions llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll

Large diffs are not rendered by default.

419 changes: 330 additions & 89 deletions llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll

Large diffs are not rendered by default.

419 changes: 330 additions & 89 deletions llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll

Large diffs are not rendered by default.

419 changes: 330 additions & 89 deletions llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll

Large diffs are not rendered by default.

419 changes: 330 additions & 89 deletions llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll

Large diffs are not rendered by default.

424 changes: 356 additions & 68 deletions llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll

Large diffs are not rendered by default.

972 changes: 751 additions & 221 deletions llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll

Large diffs are not rendered by default.

972 changes: 751 additions & 221 deletions llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll

Large diffs are not rendered by default.

972 changes: 751 additions & 221 deletions llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll

Large diffs are not rendered by default.

220 changes: 219 additions & 1 deletion llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
<vscale x 1 x half>,
Expand Down Expand Up @@ -510,3 +510,221 @@ entry:

ret <vscale x 16 x float> %a
}

declare <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64(
<vscale x 1 x double>,
double,
i32);

define <vscale x 1 x double> @intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: fld ft0, 8(sp)
; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64(
<vscale x 1 x double> %0,
double %1,
i32 %2)

ret <vscale x 1 x double> %a
}

declare <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64(
<vscale x 1 x double>,
<vscale x 1 x double>,
double,
<vscale x 1 x i1>,
i32);

define <vscale x 1 x double> @intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: fld ft0, 8(sp)
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64(
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
double %2,
<vscale x 1 x i1> %3,
i32 %4)

ret <vscale x 1 x double> %a
}

declare <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64(
<vscale x 2 x double>,
double,
i32);

define <vscale x 2 x double> @intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: fld ft0, 8(sp)
; CHECK-NEXT: vsetvli a0, a2, e64,m2,ta,mu
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64(
<vscale x 2 x double> %0,
double %1,
i32 %2)

ret <vscale x 2 x double> %a
}

declare <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64(
<vscale x 2 x double>,
<vscale x 2 x double>,
double,
<vscale x 2 x i1>,
i32);

define <vscale x 2 x double> @intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: fld ft0, 8(sp)
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v18, ft0, v0.t
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64(
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
double %2,
<vscale x 2 x i1> %3,
i32 %4)

ret <vscale x 2 x double> %a
}

declare <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64(
<vscale x 4 x double>,
double,
i32);

define <vscale x 4 x double> @intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: fld ft0, 8(sp)
; CHECK-NEXT: vsetvli a0, a2, e64,m4,ta,mu
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64(
<vscale x 4 x double> %0,
double %1,
i32 %2)

ret <vscale x 4 x double> %a
}

declare <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64(
<vscale x 4 x double>,
<vscale x 4 x double>,
double,
<vscale x 4 x i1>,
i32);

define <vscale x 4 x double> @intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: fld ft0, 8(sp)
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v20, ft0, v0.t
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64(
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
double %2,
<vscale x 4 x i1> %3,
i32 %4)

ret <vscale x 4 x double> %a
}

declare <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64(
<vscale x 8 x double>,
double,
i32);

define <vscale x 8 x double> @intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: fld ft0, 8(sp)
; CHECK-NEXT: vsetvli a0, a2, e64,m8,ta,mu
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64(
<vscale x 8 x double> %0,
double %1,
i32 %2)

ret <vscale x 8 x double> %a
}

declare <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64(
<vscale x 8 x double>,
<vscale x 8 x double>,
double,
<vscale x 8 x i1>,
i32);

define <vscale x 8 x double> @intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sw a1, 8(sp)
; CHECK-NEXT: sw a2, 12(sp)
; CHECK-NEXT: fld ft0, 8(sp)
; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vsetvli a0, a3, e64,m8,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v8, ft0, v0.t
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64(
<vscale x 8 x double> %0,
<vscale x 8 x double> %1,
double %2,
<vscale x 8 x i1> %3,
i32 %4)

ret <vscale x 8 x double> %a
}
224 changes: 223 additions & 1 deletion llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
<vscale x 1 x half>,
Expand Down Expand Up @@ -521,3 +521,225 @@ entry:

ret <vscale x 16 x float> %a
}

declare <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64(
<vscale x 1 x double>,
double,
i32);

define <vscale x 1 x double> @intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: fld ft0, 8(sp)
; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu
; CHECK-NEXT: vfslide1up.vf v25, v16, ft0
; CHECK-NEXT: vmv1r.v v16, v25
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64(
<vscale x 1 x double> %0,
double %1,
i32 %2)

ret <vscale x 1 x double> %a
}

declare <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64(
<vscale x 1 x double>,
<vscale x 1 x double>,
double,
<vscale x 1 x i1>,
i32);

define <vscale x 1 x double> @intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: fld ft0, 8(sp)
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64(
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
double %2,
<vscale x 1 x i1> %3,
i32 %4)

ret <vscale x 1 x double> %a
}

declare <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64(
<vscale x 2 x double>,
double,
i32);

define <vscale x 2 x double> @intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: fld ft0, 8(sp)
; CHECK-NEXT: vsetvli a0, a2, e64,m2,ta,mu
; CHECK-NEXT: vfslide1up.vf v26, v16, ft0
; CHECK-NEXT: vmv2r.v v16, v26
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64(
<vscale x 2 x double> %0,
double %1,
i32 %2)

ret <vscale x 2 x double> %a
}

declare <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64(
<vscale x 2 x double>,
<vscale x 2 x double>,
double,
<vscale x 2 x i1>,
i32);

define <vscale x 2 x double> @intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: fld ft0, 8(sp)
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v18, ft0, v0.t
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64(
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
double %2,
<vscale x 2 x i1> %3,
i32 %4)

ret <vscale x 2 x double> %a
}

declare <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64(
<vscale x 4 x double>,
double,
i32);

define <vscale x 4 x double> @intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: fld ft0, 8(sp)
; CHECK-NEXT: vsetvli a0, a2, e64,m4,ta,mu
; CHECK-NEXT: vfslide1up.vf v28, v16, ft0
; CHECK-NEXT: vmv4r.v v16, v28
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64(
<vscale x 4 x double> %0,
double %1,
i32 %2)

ret <vscale x 4 x double> %a
}

declare <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64(
<vscale x 4 x double>,
<vscale x 4 x double>,
double,
<vscale x 4 x i1>,
i32);

define <vscale x 4 x double> @intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: fld ft0, 8(sp)
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v20, ft0, v0.t
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64(
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
double %2,
<vscale x 4 x i1> %3,
i32 %4)

ret <vscale x 4 x double> %a
}

declare <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64(
<vscale x 8 x double>,
double,
i32);

define <vscale x 8 x double> @intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: fld ft0, 8(sp)
; CHECK-NEXT: vsetvli a0, a2, e64,m8,ta,mu
; CHECK-NEXT: vfslide1up.vf v8, v16, ft0
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64(
<vscale x 8 x double> %0,
double %1,
i32 %2)

ret <vscale x 8 x double> %a
}

declare <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64(
<vscale x 8 x double>,
<vscale x 8 x double>,
double,
<vscale x 8 x i1>,
i32);

define <vscale x 8 x double> @intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sw a1, 8(sp)
; CHECK-NEXT: sw a2, 12(sp)
; CHECK-NEXT: fld ft0, 8(sp)
; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vsetvli a0, a3, e64,m8,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v8, ft0, v0.t
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64(
<vscale x 8 x double> %0,
<vscale x 8 x double> %1,
double %2,
<vscale x 8 x i1> %3,
i32 %4)

ret <vscale x 8 x double> %a
}
312 changes: 168 additions & 144 deletions llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll

Large diffs are not rendered by default.

797 changes: 663 additions & 134 deletions llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll

Large diffs are not rendered by default.

388 changes: 387 additions & 1 deletion llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll

Large diffs are not rendered by default.

388 changes: 387 additions & 1 deletion llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll

Large diffs are not rendered by default.

519 changes: 397 additions & 122 deletions llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll

Large diffs are not rendered by default.

388 changes: 387 additions & 1 deletion llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll

Large diffs are not rendered by default.

1,008 changes: 719 additions & 289 deletions llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll

Large diffs are not rendered by default.

1,008 changes: 719 additions & 289 deletions llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll

Large diffs are not rendered by default.