16 changes: 0 additions & 16 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

#include <riscv_vector.h>

//
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -16,7 +15,6 @@ vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
return vfwadd_vv(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -26,7 +24,6 @@ vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
return vfwadd_vf(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -37,7 +34,6 @@ vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
return vfwadd_wv(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32.i64(<vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -47,7 +43,6 @@ vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
return vfwadd_wf(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -58,7 +53,6 @@ vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
return vfwadd_vv(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -68,7 +62,6 @@ vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
return vfwadd_vf(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -79,7 +72,6 @@ vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
return vfwadd_wv(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32.i64(<vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -89,7 +81,6 @@ vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
return vfwadd_wf(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -100,7 +91,6 @@ vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
return vfwadd_vv(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -110,7 +100,6 @@ vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
return vfwadd_vf(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -121,7 +110,6 @@ vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
return vfwadd_wv(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32.i64(<vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -131,7 +119,6 @@ vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
return vfwadd_wf(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -142,7 +129,6 @@ vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
return vfwadd_vv(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -152,7 +138,6 @@ vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
return vfwadd_vf(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -163,7 +148,6 @@ vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
return vfwadd_wv(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32.i64(<vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand Down
38 changes: 0 additions & 38 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

#include <riscv_vector.h>

//
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -15,7 +14,6 @@ vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t src, size_t vl) {
return vfwcvt_f(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -25,7 +23,6 @@ vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t src, size_t vl) {
return vfwcvt_f(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -35,7 +32,6 @@ vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t src, size_t vl) {
return vfwcvt_f(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -45,7 +41,6 @@ vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t src, size_t vl) {
return vfwcvt_f(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -55,7 +50,6 @@ vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t src, size_t vl) {
return vfwcvt_f(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -65,7 +59,6 @@ vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t src, size_t vl) {
return vfwcvt_f(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -75,7 +68,6 @@ vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t src, size_t vl) {
return vfwcvt_f(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -85,7 +77,6 @@ vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t src, size_t vl) {
return vfwcvt_f(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -95,7 +86,6 @@ vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t src, size_t vl) {
return vfwcvt_f(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -105,7 +95,6 @@ vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) {
return vfwcvt_f(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -115,7 +104,6 @@ vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
return vfwcvt_x(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -125,7 +113,6 @@ vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
return vfwcvt_rtz_x(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -135,7 +122,6 @@ vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
return vfwcvt_x(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -145,7 +131,6 @@ vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
return vfwcvt_rtz_x(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -155,7 +140,6 @@ vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
return vfwcvt_x(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -165,7 +149,6 @@ vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
return vfwcvt_rtz_x(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -175,7 +158,6 @@ vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
return vfwcvt_x(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -185,7 +167,6 @@ vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
return vfwcvt_rtz_x(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -195,7 +176,6 @@ vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
return vfwcvt_xu(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -205,7 +185,6 @@ vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
return vfwcvt_rtz_xu(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -215,7 +194,6 @@ vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
return vfwcvt_xu(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -225,7 +203,6 @@ vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
return vfwcvt_rtz_xu(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -235,7 +212,6 @@ vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
return vfwcvt_xu(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -245,7 +221,6 @@ vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
return vfwcvt_rtz_xu(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -255,7 +230,6 @@ vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
return vfwcvt_xu(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -265,7 +239,6 @@ vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
return vfwcvt_rtz_xu(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -275,7 +248,6 @@ vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) {
return vfwcvt_f(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -285,7 +257,6 @@ vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) {
return vfwcvt_f(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -295,7 +266,6 @@ vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) {
return vfwcvt_f(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -305,7 +275,6 @@ vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) {
return vfwcvt_f(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -315,7 +284,6 @@ vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) {
return vfwcvt_f(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -325,7 +293,6 @@ vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) {
return vfwcvt_f(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -335,7 +302,6 @@ vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) {
return vfwcvt_f(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -345,7 +311,6 @@ vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) {
return vfwcvt_f(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -355,7 +320,6 @@ vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) {
return vfwcvt_f(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -365,7 +329,6 @@ vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) {
return vfwcvt_f(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
Expand All @@ -375,7 +338,6 @@ vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) {
return vfwcvt_f(src, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
Expand Down
16 changes: 0 additions & 16 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

#include <riscv_vector.h>

//
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -16,7 +15,6 @@ vfloat64m1_t test_vfwmacc_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
return vfwmacc(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -27,7 +25,6 @@ vfloat64m1_t test_vfwmacc_vf_f64m1(vfloat64m1_t acc, float op1,
return vfwmacc(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -38,7 +35,6 @@ vfloat64m2_t test_vfwmacc_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
return vfwmacc(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -49,7 +45,6 @@ vfloat64m2_t test_vfwmacc_vf_f64m2(vfloat64m2_t acc, float op1,
return vfwmacc(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -60,7 +55,6 @@ vfloat64m4_t test_vfwmacc_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
return vfwmacc(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -71,7 +65,6 @@ vfloat64m4_t test_vfwmacc_vf_f64m4(vfloat64m4_t acc, float op1,
return vfwmacc(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -82,7 +75,6 @@ vfloat64m8_t test_vfwmacc_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
return vfwmacc(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -93,7 +85,6 @@ vfloat64m8_t test_vfwmacc_vf_f64m8(vfloat64m8_t acc, float op1,
return vfwmacc(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -105,7 +96,6 @@ vfloat64m1_t test_vfwmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
return vfwmacc(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -116,7 +106,6 @@ vfloat64m1_t test_vfwmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
return vfwmacc(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -128,7 +117,6 @@ vfloat64m2_t test_vfwmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
return vfwmacc(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -139,7 +127,6 @@ vfloat64m2_t test_vfwmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
return vfwmacc(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -151,7 +138,6 @@ vfloat64m4_t test_vfwmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
return vfwmacc(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -162,7 +148,6 @@ vfloat64m4_t test_vfwmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
return vfwmacc(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -174,7 +159,6 @@ vfloat64m8_t test_vfwmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
return vfwmacc(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand Down
16 changes: 0 additions & 16 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

#include <riscv_vector.h>

//
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -16,7 +15,6 @@ vfloat64m1_t test_vfwmsac_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
return vfwmsac(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -27,7 +25,6 @@ vfloat64m1_t test_vfwmsac_vf_f64m1(vfloat64m1_t acc, float op1,
return vfwmsac(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -38,7 +35,6 @@ vfloat64m2_t test_vfwmsac_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
return vfwmsac(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -49,7 +45,6 @@ vfloat64m2_t test_vfwmsac_vf_f64m2(vfloat64m2_t acc, float op1,
return vfwmsac(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -60,7 +55,6 @@ vfloat64m4_t test_vfwmsac_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
return vfwmsac(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -71,7 +65,6 @@ vfloat64m4_t test_vfwmsac_vf_f64m4(vfloat64m4_t acc, float op1,
return vfwmsac(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -82,7 +75,6 @@ vfloat64m8_t test_vfwmsac_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
return vfwmsac(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -93,7 +85,6 @@ vfloat64m8_t test_vfwmsac_vf_f64m8(vfloat64m8_t acc, float op1,
return vfwmsac(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -105,7 +96,6 @@ vfloat64m1_t test_vfwmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
return vfwmsac(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -116,7 +106,6 @@ vfloat64m1_t test_vfwmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
return vfwmsac(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -128,7 +117,6 @@ vfloat64m2_t test_vfwmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
return vfwmsac(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -139,7 +127,6 @@ vfloat64m2_t test_vfwmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
return vfwmsac(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -151,7 +138,6 @@ vfloat64m4_t test_vfwmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
return vfwmsac(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -162,7 +148,6 @@ vfloat64m4_t test_vfwmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
return vfwmsac(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -174,7 +159,6 @@ vfloat64m8_t test_vfwmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
return vfwmsac(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand Down
8 changes: 0 additions & 8 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

#include <riscv_vector.h>

//
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -16,7 +15,6 @@ vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
return vfwmul(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -26,7 +24,6 @@ vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
return vfwmul(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -37,7 +34,6 @@ vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
return vfwmul(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -47,7 +43,6 @@ vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
return vfwmul(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -58,7 +53,6 @@ vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
return vfwmul(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -68,7 +62,6 @@ vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
return vfwmul(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -79,7 +72,6 @@ vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
return vfwmul(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand Down
16 changes: 0 additions & 16 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

#include <riscv_vector.h>

//
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -16,7 +15,6 @@ vfloat64m1_t test_vfwnmacc_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
return vfwnmacc(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -27,7 +25,6 @@ vfloat64m1_t test_vfwnmacc_vf_f64m1(vfloat64m1_t acc, float op1,
return vfwnmacc(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -38,7 +35,6 @@ vfloat64m2_t test_vfwnmacc_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
return vfwnmacc(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -49,7 +45,6 @@ vfloat64m2_t test_vfwnmacc_vf_f64m2(vfloat64m2_t acc, float op1,
return vfwnmacc(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -60,7 +55,6 @@ vfloat64m4_t test_vfwnmacc_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
return vfwnmacc(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -71,7 +65,6 @@ vfloat64m4_t test_vfwnmacc_vf_f64m4(vfloat64m4_t acc, float op1,
return vfwnmacc(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -82,7 +75,6 @@ vfloat64m8_t test_vfwnmacc_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
return vfwnmacc(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -93,7 +85,6 @@ vfloat64m8_t test_vfwnmacc_vf_f64m8(vfloat64m8_t acc, float op1,
return vfwnmacc(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -105,7 +96,6 @@ vfloat64m1_t test_vfwnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
return vfwnmacc(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -116,7 +106,6 @@ vfloat64m1_t test_vfwnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
return vfwnmacc(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -128,7 +117,6 @@ vfloat64m2_t test_vfwnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
return vfwnmacc(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -139,7 +127,6 @@ vfloat64m2_t test_vfwnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
return vfwnmacc(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -151,7 +138,6 @@ vfloat64m4_t test_vfwnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
return vfwnmacc(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -162,7 +148,6 @@ vfloat64m4_t test_vfwnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
return vfwnmacc(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -174,7 +159,6 @@ vfloat64m8_t test_vfwnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
return vfwnmacc(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand Down
16 changes: 0 additions & 16 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

#include <riscv_vector.h>

//
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -16,7 +15,6 @@ vfloat64m1_t test_vfwnmsac_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
return vfwnmsac(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -27,7 +25,6 @@ vfloat64m1_t test_vfwnmsac_vf_f64m1(vfloat64m1_t acc, float op1,
return vfwnmsac(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -38,7 +35,6 @@ vfloat64m2_t test_vfwnmsac_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
return vfwnmsac(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -49,7 +45,6 @@ vfloat64m2_t test_vfwnmsac_vf_f64m2(vfloat64m2_t acc, float op1,
return vfwnmsac(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -60,7 +55,6 @@ vfloat64m4_t test_vfwnmsac_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
return vfwnmsac(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -71,7 +65,6 @@ vfloat64m4_t test_vfwnmsac_vf_f64m4(vfloat64m4_t acc, float op1,
return vfwnmsac(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -82,7 +75,6 @@ vfloat64m8_t test_vfwnmsac_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
return vfwnmsac(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -93,7 +85,6 @@ vfloat64m8_t test_vfwnmsac_vf_f64m8(vfloat64m8_t acc, float op1,
return vfwnmsac(acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -105,7 +96,6 @@ vfloat64m1_t test_vfwnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
return vfwnmsac(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -116,7 +106,6 @@ vfloat64m1_t test_vfwnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
return vfwnmsac(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -128,7 +117,6 @@ vfloat64m2_t test_vfwnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
return vfwnmsac(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -139,7 +127,6 @@ vfloat64m2_t test_vfwnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
return vfwnmsac(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -151,7 +138,6 @@ vfloat64m4_t test_vfwnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
return vfwnmsac(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -162,7 +148,6 @@ vfloat64m4_t test_vfwnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
return vfwnmsac(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -174,7 +159,6 @@ vfloat64m8_t test_vfwnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
return vfwnmsac(mask, acc, op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand Down
20 changes: 0 additions & 20 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

#include <riscv_vector.h>

//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -17,7 +16,6 @@ vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1(vfloat64m1_t dst,
return vfwredsum(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -29,7 +27,6 @@ vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1(vfloat64m1_t dst,
return vfwredsum(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -41,7 +38,6 @@ vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1(vfloat64m1_t dst,
return vfwredsum(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -53,7 +49,6 @@ vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1(vfloat64m1_t dst,
return vfwredsum(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -65,7 +60,6 @@ vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1(vfloat64m1_t dst,
return vfwredsum(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -77,7 +71,6 @@ vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
return vfwredsum(mask, dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -89,7 +82,6 @@ vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
return vfwredsum(mask, dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -101,7 +93,6 @@ vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
return vfwredsum(mask, dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -113,7 +104,6 @@ vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
return vfwredsum(mask, dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -125,7 +115,6 @@ vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst,
return vfwredsum(mask, dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -137,7 +126,6 @@ vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat64m1_t dst,
return vfwredosum(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -149,7 +137,6 @@ vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat64m1_t dst,
return vfwredosum(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -161,7 +148,6 @@ vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat64m1_t dst,
return vfwredosum(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -173,7 +159,6 @@ vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat64m1_t dst,
return vfwredosum(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
Expand All @@ -185,7 +170,6 @@ vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat64m1_t dst,
return vfwredosum(dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -197,7 +181,6 @@ vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
return vfwredosum(mask, dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -209,7 +192,6 @@ vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
return vfwredosum(mask, dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -221,7 +203,6 @@ vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
return vfwredosum(mask, dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -233,7 +214,6 @@ vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
return vfwredosum(mask, dst, vector, scalar, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand Down
16 changes: 0 additions & 16 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

#include <riscv_vector.h>

//
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -16,7 +15,6 @@ vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
return vfwsub_vv(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -26,7 +24,6 @@ vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
return vfwsub_vf(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -37,7 +34,6 @@ vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
return vfwsub_wv(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32.i64(<vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -47,7 +43,6 @@ vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
return vfwsub_wf(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -58,7 +53,6 @@ vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
return vfwsub_vv(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -68,7 +62,6 @@ vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
return vfwsub_vf(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -79,7 +72,6 @@ vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
return vfwsub_wv(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32.i64(<vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -89,7 +81,6 @@ vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
return vfwsub_wf(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -100,7 +91,6 @@ vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
return vfwsub_vv(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -110,7 +100,6 @@ vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
return vfwsub_vf(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -121,7 +110,6 @@ vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
return vfwsub_wv(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32.i64(<vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -131,7 +119,6 @@ vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
return vfwsub_wf(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -142,7 +129,6 @@ vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
return vfwsub_vv(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -152,7 +138,6 @@ vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
return vfwsub_vf(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -163,7 +148,6 @@ vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
return vfwsub_wv(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32.i64(<vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
Expand Down
22 changes: 0 additions & 22 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

#include <riscv_vector.h>

//
// CHECK-RV64-LABEL: @test_vid_v_u8mf8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -15,7 +14,6 @@ vuint8mf8_t test_vid_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
return vid(mask, maskedoff, vl);
}

//
// CHECK-RV64-LABEL: @test_vid_v_u8mf4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -26,7 +24,6 @@ vuint8mf4_t test_vid_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
return vid(mask, maskedoff, vl);
}

//
// CHECK-RV64-LABEL: @test_vid_v_u8mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -37,7 +34,6 @@ vuint8mf2_t test_vid_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
return vid(mask, maskedoff, vl);
}

//
// CHECK-RV64-LABEL: @test_vid_v_u8m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -47,7 +43,6 @@ vuint8m1_t test_vid_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) {
return vid(mask, maskedoff, vl);
}

//
// CHECK-RV64-LABEL: @test_vid_v_u8m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -57,7 +52,6 @@ vuint8m2_t test_vid_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) {
return vid(mask, maskedoff, vl);
}

//
// CHECK-RV64-LABEL: @test_vid_v_u8m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -67,7 +61,6 @@ vuint8m4_t test_vid_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) {
return vid(mask, maskedoff, vl);
}

//
// CHECK-RV64-LABEL: @test_vid_v_u8m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vid.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -77,7 +70,6 @@ vuint8m8_t test_vid_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) {
return vid(mask, maskedoff, vl);
}

//
// CHECK-RV64-LABEL: @test_vid_v_u16mf4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -88,7 +80,6 @@ vuint16mf4_t test_vid_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
return vid(mask, maskedoff, vl);
}

//
// CHECK-RV64-LABEL: @test_vid_v_u16mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -99,7 +90,6 @@ vuint16mf2_t test_vid_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
return vid(mask, maskedoff, vl);
}

//
// CHECK-RV64-LABEL: @test_vid_v_u16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -110,7 +100,6 @@ vuint16m1_t test_vid_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
return vid(mask, maskedoff, vl);
}

//
// CHECK-RV64-LABEL: @test_vid_v_u16m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -121,7 +110,6 @@ vuint16m2_t test_vid_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
return vid(mask, maskedoff, vl);
}

//
// CHECK-RV64-LABEL: @test_vid_v_u16m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -132,7 +120,6 @@ vuint16m4_t test_vid_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
return vid(mask, maskedoff, vl);
}

//
// CHECK-RV64-LABEL: @test_vid_v_u16m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -143,7 +130,6 @@ vuint16m8_t test_vid_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
return vid(mask, maskedoff, vl);
}

//
// CHECK-RV64-LABEL: @test_vid_v_u32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -154,7 +140,6 @@ vuint32mf2_t test_vid_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
return vid(mask, maskedoff, vl);
}

//
// CHECK-RV64-LABEL: @test_vid_v_u32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -165,7 +150,6 @@ vuint32m1_t test_vid_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
return vid(mask, maskedoff, vl);
}

//
// CHECK-RV64-LABEL: @test_vid_v_u32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -176,7 +160,6 @@ vuint32m2_t test_vid_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
return vid(mask, maskedoff, vl);
}

//
// CHECK-RV64-LABEL: @test_vid_v_u32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -187,7 +170,6 @@ vuint32m4_t test_vid_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
return vid(mask, maskedoff, vl);
}

//
// CHECK-RV64-LABEL: @test_vid_v_u32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -198,7 +180,6 @@ vuint32m8_t test_vid_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
return vid(mask, maskedoff, vl);
}

//
// CHECK-RV64-LABEL: @test_vid_v_u64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -209,7 +190,6 @@ vuint64m1_t test_vid_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
return vid(mask, maskedoff, vl);
}

//
// CHECK-RV64-LABEL: @test_vid_v_u64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -220,7 +200,6 @@ vuint64m2_t test_vid_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
return vid(mask, maskedoff, vl);
}

//
// CHECK-RV64-LABEL: @test_vid_v_u64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -231,7 +210,6 @@ vuint64m4_t test_vid_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
return vid(mask, maskedoff, vl);
}

//
// CHECK-RV64-LABEL: @test_vid_v_u64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand Down
22 changes: 0 additions & 22 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

#include <riscv_vector.h>

//
// CHECK-RV64-LABEL: @test_viota_m_u8mf8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -15,7 +14,6 @@ vuint8mf8_t test_viota_m_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
return viota(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_viota_m_u8mf4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -26,7 +24,6 @@ vuint8mf4_t test_viota_m_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
return viota(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_viota_m_u8mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -37,7 +34,6 @@ vuint8mf2_t test_viota_m_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
return viota(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_viota_m_u8m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -48,7 +44,6 @@ vuint8m1_t test_viota_m_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
return viota(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_viota_m_u8m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -59,7 +54,6 @@ vuint8m2_t test_viota_m_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
return viota(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_viota_m_u8m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -70,7 +64,6 @@ vuint8m4_t test_viota_m_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
return viota(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_viota_m_u8m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -81,7 +74,6 @@ vuint8m8_t test_viota_m_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
return viota(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_viota_m_u16mf4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -92,7 +84,6 @@ vuint16mf4_t test_viota_m_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
return viota(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_viota_m_u16mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -103,7 +94,6 @@ vuint16mf2_t test_viota_m_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
return viota(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_viota_m_u16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -114,7 +104,6 @@ vuint16m1_t test_viota_m_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
return viota(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_viota_m_u16m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -125,7 +114,6 @@ vuint16m2_t test_viota_m_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
return viota(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_viota_m_u16m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -136,7 +124,6 @@ vuint16m4_t test_viota_m_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
return viota(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_viota_m_u16m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -147,7 +134,6 @@ vuint16m8_t test_viota_m_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
return viota(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_viota_m_u32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -158,7 +144,6 @@ vuint32mf2_t test_viota_m_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
return viota(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_viota_m_u32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -169,7 +154,6 @@ vuint32m1_t test_viota_m_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
return viota(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_viota_m_u32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -180,7 +164,6 @@ vuint32m2_t test_viota_m_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
return viota(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_viota_m_u32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -191,7 +174,6 @@ vuint32m4_t test_viota_m_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
return viota(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_viota_m_u32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -202,7 +184,6 @@ vuint32m8_t test_viota_m_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
return viota(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_viota_m_u64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -213,7 +194,6 @@ vuint64m1_t test_viota_m_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
return viota(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_viota_m_u64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -224,7 +204,6 @@ vuint64m2_t test_viota_m_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
return viota(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_viota_m_u64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand All @@ -235,7 +214,6 @@ vuint64m4_t test_viota_m_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
return viota(mask, maskedoff, op1, vl);
}

//
// CHECK-RV64-LABEL: @test_viota_m_u64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
Expand Down
191 changes: 0 additions & 191 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c

Large diffs are not rendered by default.

191 changes: 0 additions & 191 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c

Large diffs are not rendered by default.

176 changes: 0 additions & 176 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c

Large diffs are not rendered by default.

176 changes: 0 additions & 176 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadc.c

Large diffs are not rendered by default.

176 changes: 0 additions & 176 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c

Large diffs are not rendered by default.

14 changes: 0 additions & 14 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmand.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

#include <riscv_vector.h>

//
// CHECK-RV64-LABEL: @test_vmand_mm_b1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -14,7 +13,6 @@ vbool1_t test_vmand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
return vmand(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vmand_mm_b2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmand.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -24,7 +22,6 @@ vbool2_t test_vmand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
return vmand(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vmand_mm_b4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -34,7 +31,6 @@ vbool4_t test_vmand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
return vmand(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vmand_mm_b8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmand.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -44,7 +40,6 @@ vbool8_t test_vmand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
return vmand(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vmand_mm_b16(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -54,7 +49,6 @@ vbool16_t test_vmand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
return vmand(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vmand_mm_b32(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -64,7 +58,6 @@ vbool32_t test_vmand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
return vmand(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vmand_mm_b64(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -74,7 +67,6 @@ vbool64_t test_vmand_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
return vmand(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vmandnot_mm_b1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -84,7 +76,6 @@ vbool1_t test_vmandnot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
return vmandnot(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vmandnot_mm_b2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -94,7 +85,6 @@ vbool2_t test_vmandnot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
return vmandnot(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vmandnot_mm_b4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -104,7 +94,6 @@ vbool4_t test_vmandnot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
return vmandnot(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vmandnot_mm_b8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -114,7 +103,6 @@ vbool8_t test_vmandnot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
return vmandnot(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vmandnot_mm_b16(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -124,7 +112,6 @@ vbool16_t test_vmandnot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
return vmandnot(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vmandnot_mm_b32(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
Expand All @@ -134,7 +121,6 @@ vbool32_t test_vmandnot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
return vmandnot(op1, op2, vl);
}

//
// CHECK-RV64-LABEL: @test_vmandnot_mm_b64(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
Expand Down
Loading