104 changes: 0 additions & 104 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c

Large diffs are not rendered by default.

18 changes: 0 additions & 18 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

#include <riscv_vector.h>

//
// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
Expand All @@ -15,7 +14,6 @@ vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) {
return vfneg_v_f32mf2(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfneg_v_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], i64 [[VL:%.*]])
Expand All @@ -25,7 +23,6 @@ vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) {
return vfneg_v_f32m1(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfneg_v_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], i64 [[VL:%.*]])
Expand All @@ -35,7 +32,6 @@ vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) {
return vfneg_v_f32m2(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfneg_v_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], i64 [[VL:%.*]])
Expand All @@ -45,7 +41,6 @@ vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) {
return vfneg_v_f32m4(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfneg_v_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], i64 [[VL:%.*]])
Expand All @@ -55,7 +50,6 @@ vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) {
return vfneg_v_f32m8(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfneg_v_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], i64 [[VL:%.*]])
Expand All @@ -65,7 +59,6 @@ vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) {
return vfneg_v_f64m1(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfneg_v_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], i64 [[VL:%.*]])
Expand All @@ -75,7 +68,6 @@ vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) {
return vfneg_v_f64m2(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfneg_v_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], i64 [[VL:%.*]])
Expand All @@ -85,7 +77,6 @@ vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) {
return vfneg_v_f64m4(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfneg_v_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], i64 [[VL:%.*]])
Expand All @@ -95,7 +86,6 @@ vfloat64m8_t test_vfneg_v_f64m8 (vfloat64m8_t op1, size_t vl) {
return vfneg_v_f64m8(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -105,7 +95,6 @@ vfloat32mf2_t test_vfneg_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vf
return vfneg_v_f32mf2_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfneg_v_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -115,7 +104,6 @@ vfloat32m1_t test_vfneg_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloa
return vfneg_v_f32m1_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfneg_v_f32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -125,7 +113,6 @@ vfloat32m2_t test_vfneg_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloa
return vfneg_v_f32m2_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfneg_v_f32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -135,7 +122,6 @@ vfloat32m4_t test_vfneg_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat
return vfneg_v_f32m4_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfneg_v_f32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -145,7 +131,6 @@ vfloat32m8_t test_vfneg_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat
return vfneg_v_f32m8_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfneg_v_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -155,7 +140,6 @@ vfloat64m1_t test_vfneg_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloa
return vfneg_v_f64m1_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfneg_v_f64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -165,7 +149,6 @@ vfloat64m2_t test_vfneg_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloa
return vfneg_v_f64m2_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfneg_v_f64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -175,7 +158,6 @@ vfloat64m4_t test_vfneg_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloa
return vfneg_v_f64m4_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfneg_v_f64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand Down
36 changes: 0 additions & 36 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c

Large diffs are not rendered by default.

36 changes: 0 additions & 36 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c

Large diffs are not rendered by default.

36 changes: 0 additions & 36 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c

Large diffs are not rendered by default.

36 changes: 0 additions & 36 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c

Large diffs are not rendered by default.

18 changes: 0 additions & 18 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

#include <riscv_vector.h>

//
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -15,7 +14,6 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
return vfrdiv_vf_f32mf2(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -25,7 +23,6 @@ vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
return vfrdiv_vf_f32m1(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -35,7 +32,6 @@ vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
return vfrdiv_vf_f32m2(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -45,7 +41,6 @@ vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
return vfrdiv_vf_f32m4(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -55,7 +50,6 @@ vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
return vfrdiv_vf_f32m8(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -65,7 +59,6 @@ vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
return vfrdiv_vf_f64m1(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -75,7 +68,6 @@ vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
return vfrdiv_vf_f64m2(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -85,7 +77,6 @@ vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
return vfrdiv_vf_f64m4(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -95,7 +86,6 @@ vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
return vfrdiv_vf_f64m8(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -106,7 +96,6 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
return vfrdiv_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -117,7 +106,6 @@ vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
return vfrdiv_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -128,7 +116,6 @@ vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
return vfrdiv_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -139,7 +126,6 @@ vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
return vfrdiv_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -150,7 +136,6 @@ vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
return vfrdiv_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -161,7 +146,6 @@ vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
return vfrdiv_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -172,7 +156,6 @@ vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
return vfrdiv_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -183,7 +166,6 @@ vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
return vfrdiv_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand Down
18 changes: 0 additions & 18 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

#include <riscv_vector.h>

//
// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -15,7 +14,6 @@ vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
return vfrec7_v_f32mf2(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -25,7 +23,6 @@ vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) {
return vfrec7_v_f32m1(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -35,7 +32,6 @@ vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) {
return vfrec7_v_f32m2(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -45,7 +41,6 @@ vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) {
return vfrec7_v_f32m4(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -55,7 +50,6 @@ vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) {
return vfrec7_v_f32m8(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -65,7 +59,6 @@ vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) {
return vfrec7_v_f64m1(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -75,7 +68,6 @@ vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) {
return vfrec7_v_f64m2(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -85,7 +77,6 @@ vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) {
return vfrec7_v_f64m4(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -95,7 +86,6 @@ vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) {
return vfrec7_v_f64m8(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -106,7 +96,6 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
return vfrec7_v_f32mf2_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -117,7 +106,6 @@ vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
return vfrec7_v_f32m1_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -128,7 +116,6 @@ vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
return vfrec7_v_f32m2_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -139,7 +126,6 @@ vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
return vfrec7_v_f32m4_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -150,7 +136,6 @@ vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
return vfrec7_v_f32m8_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -161,7 +146,6 @@ vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
return vfrec7_v_f64m1_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -172,7 +156,6 @@ vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
return vfrec7_v_f64m2_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -183,7 +166,6 @@ vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
return vfrec7_v_f64m4_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand Down
18 changes: 0 additions & 18 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

#include <riscv_vector.h>

//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -17,7 +16,6 @@ vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32m1_t dst,
return vfredmax_vs_f32mf2_f32m1(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -28,7 +26,6 @@ vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
return vfredmax_vs_f32m1_f32m1(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -39,7 +36,6 @@ vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
return vfredmax_vs_f32m2_f32m1(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -50,7 +46,6 @@ vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
return vfredmax_vs_f32m4_f32m1(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -61,7 +56,6 @@ vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
return vfredmax_vs_f32m8_f32m1(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -72,7 +66,6 @@ vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
return vfredmax_vs_f64m1_f64m1(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -83,7 +76,6 @@ vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
return vfredmax_vs_f64m2_f64m1(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -94,7 +86,6 @@ vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
return vfredmax_vs_f64m4_f64m1(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -105,7 +96,6 @@ vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
return vfredmax_vs_f64m8_f64m1(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -117,7 +107,6 @@ vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
return vfredmax_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -129,7 +118,6 @@ vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
return vfredmax_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -141,7 +129,6 @@ vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
return vfredmax_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -153,7 +140,6 @@ vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
return vfredmax_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -165,7 +151,6 @@ vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
return vfredmax_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -177,7 +162,6 @@ vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
return vfredmax_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -189,7 +173,6 @@ vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
return vfredmax_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -201,7 +184,6 @@ vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
return vfredmax_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand Down
18 changes: 0 additions & 18 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

#include <riscv_vector.h>

//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -17,7 +16,6 @@ vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32m1_t dst,
return vfredmin_vs_f32mf2_f32m1(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -28,7 +26,6 @@ vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
return vfredmin_vs_f32m1_f32m1(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -39,7 +36,6 @@ vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
return vfredmin_vs_f32m2_f32m1(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -50,7 +46,6 @@ vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
return vfredmin_vs_f32m4_f32m1(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -61,7 +56,6 @@ vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
return vfredmin_vs_f32m8_f32m1(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -72,7 +66,6 @@ vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
return vfredmin_vs_f64m1_f64m1(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -83,7 +76,6 @@ vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
return vfredmin_vs_f64m2_f64m1(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -94,7 +86,6 @@ vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
return vfredmin_vs_f64m4_f64m1(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -105,7 +96,6 @@ vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
return vfredmin_vs_f64m8_f64m1(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -117,7 +107,6 @@ vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
return vfredmin_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -129,7 +118,6 @@ vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
return vfredmin_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -141,7 +129,6 @@ vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
return vfredmin_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -153,7 +140,6 @@ vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
return vfredmin_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -165,7 +151,6 @@ vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
return vfredmin_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -177,7 +162,6 @@ vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
return vfredmin_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -189,7 +173,6 @@ vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
return vfredmin_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -201,7 +184,6 @@ vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
return vfredmin_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand Down
36 changes: 0 additions & 36 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c

Large diffs are not rendered by default.

18 changes: 0 additions & 18 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

#include <riscv_vector.h>

//
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -15,7 +14,6 @@ vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
return vfrsqrt7_v_f32mf2(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -25,7 +23,6 @@ vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t op1, size_t vl) {
return vfrsqrt7_v_f32m1(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -35,7 +32,6 @@ vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t op1, size_t vl) {
return vfrsqrt7_v_f32m2(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -45,7 +41,6 @@ vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t op1, size_t vl) {
return vfrsqrt7_v_f32m4(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -55,7 +50,6 @@ vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t op1, size_t vl) {
return vfrsqrt7_v_f32m8(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -65,7 +59,6 @@ vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t op1, size_t vl) {
return vfrsqrt7_v_f64m1(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -75,7 +68,6 @@ vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t op1, size_t vl) {
return vfrsqrt7_v_f64m2(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -85,7 +77,6 @@ vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t op1, size_t vl) {
return vfrsqrt7_v_f64m4(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -95,7 +86,6 @@ vfloat64m8_t test_vfrsqrt7_v_f64m8(vfloat64m8_t op1, size_t vl) {
return vfrsqrt7_v_f64m8(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -106,7 +96,6 @@ vfloat32mf2_t test_vfrsqrt7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
return vfrsqrt7_v_f32mf2_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -117,7 +106,6 @@ vfloat32m1_t test_vfrsqrt7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
return vfrsqrt7_v_f32m1_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -128,7 +116,6 @@ vfloat32m2_t test_vfrsqrt7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
return vfrsqrt7_v_f32m2_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -139,7 +126,6 @@ vfloat32m4_t test_vfrsqrt7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
return vfrsqrt7_v_f32m4_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -150,7 +136,6 @@ vfloat32m8_t test_vfrsqrt7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
return vfrsqrt7_v_f32m8_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -161,7 +146,6 @@ vfloat64m1_t test_vfrsqrt7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
return vfrsqrt7_v_f64m1_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -172,7 +156,6 @@ vfloat64m2_t test_vfrsqrt7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
return vfrsqrt7_v_f64m2_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -183,7 +166,6 @@ vfloat64m4_t test_vfrsqrt7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
return vfrsqrt7_v_f64m4_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand Down
18 changes: 0 additions & 18 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

#include <riscv_vector.h>

//
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -15,7 +14,6 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
return vfrsub_vf_f32mf2(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -25,7 +23,6 @@ vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
return vfrsub_vf_f32m1(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -35,7 +32,6 @@ vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
return vfrsub_vf_f32m2(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -45,7 +41,6 @@ vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
return vfrsub_vf_f32m4(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -55,7 +50,6 @@ vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
return vfrsub_vf_f32m8(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -65,7 +59,6 @@ vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
return vfrsub_vf_f64m1(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -75,7 +68,6 @@ vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
return vfrsub_vf_f64m2(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -85,7 +77,6 @@ vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
return vfrsub_vf_f64m4(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -95,7 +86,6 @@ vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
return vfrsub_vf_f64m8(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -106,7 +96,6 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
return vfrsub_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -117,7 +106,6 @@ vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
return vfrsub_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -128,7 +116,6 @@ vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
return vfrsub_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -139,7 +126,6 @@ vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
return vfrsub_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -150,7 +136,6 @@ vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
return vfrsub_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -161,7 +146,6 @@ vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
return vfrsub_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -172,7 +156,6 @@ vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
return vfrsub_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -183,7 +166,6 @@ vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
return vfrsub_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand Down
108 changes: 0 additions & 108 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c

Large diffs are not rendered by default.

18 changes: 0 additions & 18 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

#include <riscv_vector.h>

//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32.i64(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
Expand All @@ -16,7 +15,6 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value,
return vfslide1down_vf_f32mf2(src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32.i64(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
Expand All @@ -27,7 +25,6 @@ vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value,
return vfslide1down_vf_f32m1(src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32.i64(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
Expand All @@ -38,7 +35,6 @@ vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value,
return vfslide1down_vf_f32m2(src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32.i64(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
Expand All @@ -49,7 +45,6 @@ vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value,
return vfslide1down_vf_f32m4(src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32.i64(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
Expand All @@ -60,7 +55,6 @@ vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value,
return vfslide1down_vf_f32m8(src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64.i64(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
Expand All @@ -71,7 +65,6 @@ vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value,
return vfslide1down_vf_f64m1(src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64.i64(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
Expand All @@ -82,7 +75,6 @@ vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value,
return vfslide1down_vf_f64m2(src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64.i64(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
Expand All @@ -93,7 +85,6 @@ vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value,
return vfslide1down_vf_f64m4(src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64.i64(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
Expand All @@ -104,7 +95,6 @@ vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, double value,
return vfslide1down_vf_f64m8(src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -117,7 +107,6 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2_m(vbool64_t mask,
return vfslide1down_vf_f32mf2_m(mask, maskedoff, src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -130,7 +119,6 @@ vfloat32m1_t test_vfslide1down_vf_f32m1_m(vbool32_t mask,
return vfslide1down_vf_f32m1_m(mask, maskedoff, src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -143,7 +131,6 @@ vfloat32m2_t test_vfslide1down_vf_f32m2_m(vbool16_t mask,
return vfslide1down_vf_f32m2_m(mask, maskedoff, src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -155,7 +142,6 @@ vfloat32m4_t test_vfslide1down_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
return vfslide1down_vf_f32m4_m(mask, maskedoff, src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -167,7 +153,6 @@ vfloat32m8_t test_vfslide1down_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
return vfslide1down_vf_f32m8_m(mask, maskedoff, src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -180,7 +165,6 @@ vfloat64m1_t test_vfslide1down_vf_f64m1_m(vbool64_t mask,
return vfslide1down_vf_f64m1_m(mask, maskedoff, src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -193,7 +177,6 @@ vfloat64m2_t test_vfslide1down_vf_f64m2_m(vbool32_t mask,
return vfslide1down_vf_f64m2_m(mask, maskedoff, src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -206,7 +189,6 @@ vfloat64m4_t test_vfslide1down_vf_f64m4_m(vbool16_t mask,
return vfslide1down_vf_f64m4_m(mask, maskedoff, src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand Down
18 changes: 0 additions & 18 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

#include <riscv_vector.h>

//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32.i64(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
Expand All @@ -16,7 +15,6 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value,
return vfslide1up_vf_f32mf2(src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32.i64(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
Expand All @@ -27,7 +25,6 @@ vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value,
return vfslide1up_vf_f32m1(src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32.i64(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
Expand All @@ -38,7 +35,6 @@ vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value,
return vfslide1up_vf_f32m2(src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32.i64(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
Expand All @@ -49,7 +45,6 @@ vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value,
return vfslide1up_vf_f32m4(src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32.i64(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
Expand All @@ -60,7 +55,6 @@ vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value,
return vfslide1up_vf_f32m8(src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64.i64(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
Expand All @@ -71,7 +65,6 @@ vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value,
return vfslide1up_vf_f64m1(src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64.i64(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
Expand All @@ -82,7 +75,6 @@ vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value,
return vfslide1up_vf_f64m2(src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64.i64(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
Expand All @@ -93,7 +85,6 @@ vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value,
return vfslide1up_vf_f64m4(src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64.i64(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
Expand All @@ -104,7 +95,6 @@ vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value,
return vfslide1up_vf_f64m8(src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -117,7 +107,6 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2_m(vbool64_t mask,
return vfslide1up_vf_f32mf2_m(mask, maskedoff, src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -129,7 +118,6 @@ vfloat32m1_t test_vfslide1up_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
return vfslide1up_vf_f32m1_m(mask, maskedoff, src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -141,7 +129,6 @@ vfloat32m2_t test_vfslide1up_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
return vfslide1up_vf_f32m2_m(mask, maskedoff, src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -153,7 +140,6 @@ vfloat32m4_t test_vfslide1up_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
return vfslide1up_vf_f32m4_m(mask, maskedoff, src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -165,7 +151,6 @@ vfloat32m8_t test_vfslide1up_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
return vfslide1up_vf_f32m8_m(mask, maskedoff, src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -177,7 +162,6 @@ vfloat64m1_t test_vfslide1up_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
return vfslide1up_vf_f64m1_m(mask, maskedoff, src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -189,7 +173,6 @@ vfloat64m2_t test_vfslide1up_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
return vfslide1up_vf_f64m2_m(mask, maskedoff, src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -201,7 +184,6 @@ vfloat64m4_t test_vfslide1up_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
return vfslide1up_vf_f64m4_m(mask, maskedoff, src, value, vl);
}

//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand Down
18 changes: 0 additions & 18 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

#include <riscv_vector.h>

//
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -15,7 +14,6 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
return vfsqrt_v_f32mf2(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -25,7 +23,6 @@ vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) {
return vfsqrt_v_f32m1(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -35,7 +32,6 @@ vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) {
return vfsqrt_v_f32m2(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -45,7 +41,6 @@ vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) {
return vfsqrt_v_f32m4(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -55,7 +50,6 @@ vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) {
return vfsqrt_v_f32m8(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -65,7 +59,6 @@ vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) {
return vfsqrt_v_f64m1(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -75,7 +68,6 @@ vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) {
return vfsqrt_v_f64m2(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -85,7 +77,6 @@ vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) {
return vfsqrt_v_f64m4(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
Expand All @@ -95,7 +86,6 @@ vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) {
return vfsqrt_v_f64m8(op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -106,7 +96,6 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
return vfsqrt_v_f32mf2_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -117,7 +106,6 @@ vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
return vfsqrt_v_f32m1_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -128,7 +116,6 @@ vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
return vfsqrt_v_f32m2_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -139,7 +126,6 @@ vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
return vfsqrt_v_f32m4_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -150,7 +136,6 @@ vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
return vfsqrt_v_f32m8_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -161,7 +146,6 @@ vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
return vfsqrt_v_f64m1_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -172,7 +156,6 @@ vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
return vfsqrt_v_f64m2_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -183,7 +166,6 @@ vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
return vfsqrt_v_f64m4_m(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand Down
36 changes: 0 additions & 36 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

#include <riscv_vector.h>

//
// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -16,7 +15,6 @@ vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
return vfsub_vv_f32mf2(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -26,7 +24,6 @@ vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
return vfsub_vf_f32mf2(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -37,7 +34,6 @@ vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
return vfsub_vv_f32m1(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -47,7 +43,6 @@ vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
return vfsub_vf_f32m1(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -58,7 +53,6 @@ vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
return vfsub_vv_f32m2(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -68,7 +62,6 @@ vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
return vfsub_vf_f32m2(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -79,7 +72,6 @@ vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
return vfsub_vv_f32m4(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -89,7 +81,6 @@ vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
return vfsub_vf_f32m4(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -100,7 +91,6 @@ vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
return vfsub_vv_f32m8(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -110,7 +100,6 @@ vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
return vfsub_vf_f32m8(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -121,7 +110,6 @@ vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
return vfsub_vv_f64m1(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -131,7 +119,6 @@ vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
return vfsub_vf_f64m1(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -142,7 +129,6 @@ vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
return vfsub_vv_f64m2(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -152,7 +138,6 @@ vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
return vfsub_vf_f64m2(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -163,7 +148,6 @@ vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
return vfsub_vv_f64m4(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -173,7 +157,6 @@ vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
return vfsub_vf_f64m4(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -184,7 +167,6 @@ vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
return vfsub_vv_f64m8(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -194,7 +176,6 @@ vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
return vfsub_vf_f64m8(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -206,7 +187,6 @@ vfloat32mf2_t test_vfsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
return vfsub_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -217,7 +197,6 @@ vfloat32mf2_t test_vfsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
return vfsub_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -229,7 +208,6 @@ vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
return vfsub_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -240,7 +218,6 @@ vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
return vfsub_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -252,7 +229,6 @@ vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
return vfsub_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -263,7 +239,6 @@ vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
return vfsub_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -275,7 +250,6 @@ vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
return vfsub_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -286,7 +260,6 @@ vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
return vfsub_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -298,7 +271,6 @@ vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
return vfsub_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -309,7 +281,6 @@ vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
return vfsub_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -321,7 +292,6 @@ vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
return vfsub_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -332,7 +302,6 @@ vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
return vfsub_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -344,7 +313,6 @@ vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
return vfsub_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -355,7 +323,6 @@ vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
return vfsub_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -367,7 +334,6 @@ vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
return vfsub_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -378,7 +344,6 @@ vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
return vfsub_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -390,7 +355,6 @@ vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
return vfsub_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand Down
Loading