32 changes: 16 additions & 16 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
Expand All @@ -17,7 +17,7 @@ vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,

// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
Expand All @@ -26,7 +26,7 @@ vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
Expand All @@ -36,7 +36,7 @@ vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,

// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32.i64(<vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
Expand All @@ -45,7 +45,7 @@ vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
Expand All @@ -55,7 +55,7 @@ vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,

// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
Expand All @@ -64,7 +64,7 @@ vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
Expand All @@ -74,7 +74,7 @@ vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,

// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32.i64(<vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
Expand All @@ -83,7 +83,7 @@ vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
Expand All @@ -93,7 +93,7 @@ vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,

// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
Expand All @@ -102,7 +102,7 @@ vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
Expand All @@ -112,7 +112,7 @@ vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,

// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32.i64(<vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
Expand All @@ -121,7 +121,7 @@ vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
Expand All @@ -131,7 +131,7 @@ vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,

// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
Expand All @@ -140,7 +140,7 @@ vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
Expand All @@ -150,7 +150,7 @@ vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,

// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32.i64(<vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) {
Expand Down
16 changes: 8 additions & 8 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
Expand All @@ -17,7 +17,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,

// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
Expand All @@ -26,7 +26,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
Expand All @@ -36,7 +36,7 @@ vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,

// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
Expand All @@ -45,7 +45,7 @@ vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
Expand All @@ -55,7 +55,7 @@ vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,

// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
Expand All @@ -64,7 +64,7 @@ vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
Expand All @@ -74,7 +74,7 @@ vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,

// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
Expand Down
32 changes: 16 additions & 16 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
Expand All @@ -17,7 +17,7 @@ vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,

// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
Expand All @@ -26,7 +26,7 @@ vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
Expand All @@ -36,7 +36,7 @@ vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,

// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32.i64(<vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
Expand All @@ -45,7 +45,7 @@ vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
Expand All @@ -55,7 +55,7 @@ vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,

// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
Expand All @@ -64,7 +64,7 @@ vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
Expand All @@ -74,7 +74,7 @@ vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,

// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32.i64(<vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
Expand All @@ -83,7 +83,7 @@ vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
Expand All @@ -93,7 +93,7 @@ vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,

// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
Expand All @@ -102,7 +102,7 @@ vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
Expand All @@ -112,7 +112,7 @@ vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,

// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32.i64(<vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
Expand All @@ -121,7 +121,7 @@ vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
Expand All @@ -131,7 +131,7 @@ vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,

// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
Expand All @@ -140,7 +140,7 @@ vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
Expand All @@ -150,7 +150,7 @@ vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,

// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32.i64(<vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) {
Expand Down
176 changes: 88 additions & 88 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c

Large diffs are not rendered by default.

176 changes: 88 additions & 88 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c

Large diffs are not rendered by default.

48 changes: 24 additions & 24 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul-eew64.c

Large diffs are not rendered by default.

392 changes: 196 additions & 196 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c

Large diffs are not rendered by default.

120 changes: 60 additions & 60 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c

Large diffs are not rendered by default.

60 changes: 30 additions & 30 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c

Large diffs are not rendered by default.

44 changes: 22 additions & 22 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

// CHECK-RV64-LABEL: @test_vneg_v_i8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vneg_v_i8mf8 (vint8mf8_t op1, size_t vl) {
Expand All @@ -16,7 +16,7 @@ vint8mf8_t test_vneg_v_i8mf8 (vint8mf8_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vneg_v_i8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vneg_v_i8mf4 (vint8mf4_t op1, size_t vl) {
Expand All @@ -25,7 +25,7 @@ vint8mf4_t test_vneg_v_i8mf4 (vint8mf4_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vneg_v_i8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vneg_v_i8mf2 (vint8mf2_t op1, size_t vl) {
Expand All @@ -34,7 +34,7 @@ vint8mf2_t test_vneg_v_i8mf2 (vint8mf2_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vneg_v_i8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vneg_v_i8m1 (vint8m1_t op1, size_t vl) {
Expand All @@ -43,7 +43,7 @@ vint8m1_t test_vneg_v_i8m1 (vint8m1_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vneg_v_i8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vneg_v_i8m2 (vint8m2_t op1, size_t vl) {
Expand All @@ -52,7 +52,7 @@ vint8m2_t test_vneg_v_i8m2 (vint8m2_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vneg_v_i8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vneg_v_i8m4 (vint8m4_t op1, size_t vl) {
Expand All @@ -61,7 +61,7 @@ vint8m4_t test_vneg_v_i8m4 (vint8m4_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vneg_v_i8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vneg_v_i8m8 (vint8m8_t op1, size_t vl) {
Expand All @@ -70,7 +70,7 @@ vint8m8_t test_vneg_v_i8m8 (vint8m8_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vneg_v_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vneg_v_i16mf4 (vint16mf4_t op1, size_t vl) {
Expand All @@ -79,7 +79,7 @@ vint16mf4_t test_vneg_v_i16mf4 (vint16mf4_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vneg_v_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vneg_v_i16mf2 (vint16mf2_t op1, size_t vl) {
Expand All @@ -88,7 +88,7 @@ vint16mf2_t test_vneg_v_i16mf2 (vint16mf2_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vneg_v_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vneg_v_i16m1 (vint16m1_t op1, size_t vl) {
Expand All @@ -97,7 +97,7 @@ vint16m1_t test_vneg_v_i16m1 (vint16m1_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vneg_v_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vneg_v_i16m2 (vint16m2_t op1, size_t vl) {
Expand All @@ -106,7 +106,7 @@ vint16m2_t test_vneg_v_i16m2 (vint16m2_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vneg_v_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vneg_v_i16m4 (vint16m4_t op1, size_t vl) {
Expand All @@ -115,7 +115,7 @@ vint16m4_t test_vneg_v_i16m4 (vint16m4_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vneg_v_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vneg_v_i16m8 (vint16m8_t op1, size_t vl) {
Expand All @@ -124,7 +124,7 @@ vint16m8_t test_vneg_v_i16m8 (vint16m8_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vneg_v_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vneg_v_i32mf2 (vint32mf2_t op1, size_t vl) {
Expand All @@ -133,7 +133,7 @@ vint32mf2_t test_vneg_v_i32mf2 (vint32mf2_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vneg_v_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vneg_v_i32m1 (vint32m1_t op1, size_t vl) {
Expand All @@ -142,7 +142,7 @@ vint32m1_t test_vneg_v_i32m1 (vint32m1_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vneg_v_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vneg_v_i32m2 (vint32m2_t op1, size_t vl) {
Expand All @@ -151,7 +151,7 @@ vint32m2_t test_vneg_v_i32m2 (vint32m2_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vneg_v_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vneg_v_i32m4 (vint32m4_t op1, size_t vl) {
Expand All @@ -160,7 +160,7 @@ vint32m4_t test_vneg_v_i32m4 (vint32m4_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vneg_v_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vneg_v_i32m8 (vint32m8_t op1, size_t vl) {
Expand All @@ -169,7 +169,7 @@ vint32m8_t test_vneg_v_i32m8 (vint32m8_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vneg_v_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vneg_v_i64m1 (vint64m1_t op1, size_t vl) {
Expand All @@ -178,7 +178,7 @@ vint64m1_t test_vneg_v_i64m1 (vint64m1_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vneg_v_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vneg_v_i64m2 (vint64m2_t op1, size_t vl) {
Expand All @@ -187,7 +187,7 @@ vint64m2_t test_vneg_v_i64m2 (vint64m2_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vneg_v_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vneg_v_i64m4 (vint64m4_t op1, size_t vl) {
Expand All @@ -196,7 +196,7 @@ vint64m4_t test_vneg_v_i64m4 (vint64m4_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vneg_v_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vneg_v_i64m8 (vint64m8_t op1, size_t vl) {
Expand Down
88 changes: 44 additions & 44 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c

Large diffs are not rendered by default.

60 changes: 30 additions & 30 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c

Large diffs are not rendered by default.

60 changes: 30 additions & 30 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c

Large diffs are not rendered by default.

176 changes: 88 additions & 88 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c

Large diffs are not rendered by default.

176 changes: 88 additions & 88 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c

Large diffs are not rendered by default.

314 changes: 157 additions & 157 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c

Large diffs are not rendered by default.

88 changes: 44 additions & 44 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c

Large diffs are not rendered by default.

176 changes: 88 additions & 88 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c

Large diffs are not rendered by default.

88 changes: 44 additions & 44 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c

Large diffs are not rendered by default.

88 changes: 44 additions & 44 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c

Large diffs are not rendered by default.

176 changes: 88 additions & 88 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c

Large diffs are not rendered by default.

16 changes: 8 additions & 8 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul-eew64.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
Expand All @@ -17,7 +17,7 @@ vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
Expand All @@ -26,7 +26,7 @@ vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
Expand All @@ -35,7 +35,7 @@ vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
Expand All @@ -44,7 +44,7 @@ vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
Expand All @@ -53,7 +53,7 @@ vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
Expand All @@ -62,7 +62,7 @@ vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
Expand All @@ -71,7 +71,7 @@ vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
Expand Down
72 changes: 36 additions & 36 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c

Large diffs are not rendered by default.

88 changes: 44 additions & 44 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c

Large diffs are not rendered by default.

88 changes: 44 additions & 44 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c

Large diffs are not rendered by default.

88 changes: 44 additions & 44 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c

Large diffs are not rendered by default.

88 changes: 44 additions & 44 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c

Large diffs are not rendered by default.

176 changes: 88 additions & 88 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c

Large diffs are not rendered by default.

176 changes: 88 additions & 88 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c

Large diffs are not rendered by default.

240 changes: 120 additions & 120 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c

Large diffs are not rendered by default.

60 changes: 30 additions & 30 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c

Large diffs are not rendered by default.

180 changes: 90 additions & 90 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c

Large diffs are not rendered by default.

240 changes: 120 additions & 120 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c

Large diffs are not rendered by default.

176 changes: 88 additions & 88 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c

Large diffs are not rendered by default.

176 changes: 88 additions & 88 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c

Large diffs are not rendered by default.

176 changes: 88 additions & 88 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c

Large diffs are not rendered by default.

176 changes: 88 additions & 88 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c

Large diffs are not rendered by default.

176 changes: 88 additions & 88 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c

Large diffs are not rendered by default.

176 changes: 88 additions & 88 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c

Large diffs are not rendered by default.

30 changes: 15 additions & 15 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfabs_v_f32mf2 (vfloat32mf2_t op1, size_t vl) {
Expand All @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2 (vfloat32mf2_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vfabs_v_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfabs_v_f32m1 (vfloat32m1_t op1, size_t vl) {
Expand All @@ -26,7 +26,7 @@ vfloat32m1_t test_vfabs_v_f32m1 (vfloat32m1_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vfabs_v_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfabs_v_f32m2 (vfloat32m2_t op1, size_t vl) {
Expand All @@ -35,7 +35,7 @@ vfloat32m2_t test_vfabs_v_f32m2 (vfloat32m2_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vfabs_v_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfabs_v_f32m4 (vfloat32m4_t op1, size_t vl) {
Expand All @@ -44,7 +44,7 @@ vfloat32m4_t test_vfabs_v_f32m4 (vfloat32m4_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vfabs_v_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfabs_v_f32m8 (vfloat32m8_t op1, size_t vl) {
Expand All @@ -53,7 +53,7 @@ vfloat32m8_t test_vfabs_v_f32m8 (vfloat32m8_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vfabs_v_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfabs_v_f64m1 (vfloat64m1_t op1, size_t vl) {
Expand All @@ -62,7 +62,7 @@ vfloat64m1_t test_vfabs_v_f64m1 (vfloat64m1_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vfabs_v_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfabs_v_f64m2 (vfloat64m2_t op1, size_t vl) {
Expand All @@ -71,7 +71,7 @@ vfloat64m2_t test_vfabs_v_f64m2 (vfloat64m2_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vfabs_v_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfabs_v_f64m4 (vfloat64m4_t op1, size_t vl) {
Expand All @@ -80,7 +80,7 @@ vfloat64m4_t test_vfabs_v_f64m4 (vfloat64m4_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vfabs_v_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfabs_v_f64m8 (vfloat64m8_t op1, size_t vl) {
Expand Down Expand Up @@ -170,7 +170,7 @@ vfloat64m8_t test_vfabs_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat

// CHECK-RV64-LABEL: @test_vfabs_v_f16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfabs_v_f16mf4 (vfloat16mf4_t op1, size_t vl) {
Expand All @@ -179,7 +179,7 @@ vfloat16mf4_t test_vfabs_v_f16mf4 (vfloat16mf4_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vfabs_v_f16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfabs_v_f16mf2 (vfloat16mf2_t op1, size_t vl) {
Expand All @@ -188,7 +188,7 @@ vfloat16mf2_t test_vfabs_v_f16mf2 (vfloat16mf2_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vfabs_v_f16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfabs_v_f16m1 (vfloat16m1_t op1, size_t vl) {
Expand All @@ -197,7 +197,7 @@ vfloat16m1_t test_vfabs_v_f16m1 (vfloat16m1_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vfabs_v_f16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfabs_v_f16m2 (vfloat16m2_t op1, size_t vl) {
Expand All @@ -206,7 +206,7 @@ vfloat16m2_t test_vfabs_v_f16m2 (vfloat16m2_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vfabs_v_f16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfabs_v_f16m4 (vfloat16m4_t op1, size_t vl) {
Expand All @@ -215,7 +215,7 @@ vfloat16m4_t test_vfabs_v_f16m4 (vfloat16m4_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vfabs_v_f16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfabs_v_f16m8 (vfloat16m8_t op1, size_t vl) {
Expand Down
60 changes: 30 additions & 30 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c

Large diffs are not rendered by default.

60 changes: 30 additions & 30 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c

Large diffs are not rendered by default.

60 changes: 30 additions & 30 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c

Large diffs are not rendered by default.

60 changes: 30 additions & 30 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c

Large diffs are not rendered by default.

60 changes: 30 additions & 30 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c

Large diffs are not rendered by default.

30 changes: 15 additions & 15 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) {
Expand All @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vfneg_v_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) {
Expand All @@ -26,7 +26,7 @@ vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vfneg_v_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) {
Expand All @@ -35,7 +35,7 @@ vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vfneg_v_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) {
Expand All @@ -44,7 +44,7 @@ vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vfneg_v_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) {
Expand All @@ -53,7 +53,7 @@ vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vfneg_v_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) {
Expand All @@ -62,7 +62,7 @@ vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vfneg_v_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) {
Expand All @@ -71,7 +71,7 @@ vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vfneg_v_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) {
Expand All @@ -80,7 +80,7 @@ vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vfneg_v_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfneg_v_f64m8 (vfloat64m8_t op1, size_t vl) {
Expand Down Expand Up @@ -170,7 +170,7 @@ vfloat64m8_t test_vfneg_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat

// CHECK-RV64-LABEL: @test_vfneg_v_f16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfneg_v_f16mf4 (vfloat16mf4_t op1, size_t vl) {
Expand All @@ -179,7 +179,7 @@ vfloat16mf4_t test_vfneg_v_f16mf4 (vfloat16mf4_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vfneg_v_f16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfneg_v_f16mf2 (vfloat16mf2_t op1, size_t vl) {
Expand All @@ -188,7 +188,7 @@ vfloat16mf2_t test_vfneg_v_f16mf2 (vfloat16mf2_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vfneg_v_f16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfneg_v_f16m1 (vfloat16m1_t op1, size_t vl) {
Expand All @@ -197,7 +197,7 @@ vfloat16m1_t test_vfneg_v_f16m1 (vfloat16m1_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vfneg_v_f16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfneg_v_f16m2 (vfloat16m2_t op1, size_t vl) {
Expand All @@ -206,7 +206,7 @@ vfloat16m2_t test_vfneg_v_f16m2 (vfloat16m2_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vfneg_v_f16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfneg_v_f16m4 (vfloat16m4_t op1, size_t vl) {
Expand All @@ -215,7 +215,7 @@ vfloat16m4_t test_vfneg_v_f16m4 (vfloat16m4_t op1, size_t vl) {

// CHECK-RV64-LABEL: @test_vfneg_v_f16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfneg_v_f16m8 (vfloat16m8_t op1, size_t vl) {
Expand Down
30 changes: 15 additions & 15 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
Expand All @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
Expand All @@ -26,7 +26,7 @@ vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
Expand All @@ -35,7 +35,7 @@ vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
Expand All @@ -44,7 +44,7 @@ vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
Expand All @@ -53,7 +53,7 @@ vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
Expand All @@ -62,7 +62,7 @@ vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
Expand All @@ -71,7 +71,7 @@ vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
Expand All @@ -80,7 +80,7 @@ vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
Expand Down Expand Up @@ -179,7 +179,7 @@ vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,

// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrdiv_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
Expand All @@ -188,7 +188,7 @@ vfloat16mf4_t test_vfrdiv_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl)

// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrdiv.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrdiv.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrdiv_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
Expand All @@ -197,7 +197,7 @@ vfloat16mf2_t test_vfrdiv_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl)

// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrdiv.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrdiv.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrdiv_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
Expand All @@ -206,7 +206,7 @@ vfloat16m1_t test_vfrdiv_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrdiv.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrdiv.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrdiv_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
Expand All @@ -215,7 +215,7 @@ vfloat16m2_t test_vfrdiv_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrdiv.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrdiv.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrdiv_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
Expand All @@ -224,7 +224,7 @@ vfloat16m4_t test_vfrdiv_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrdiv.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrdiv.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrdiv_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
Expand Down
30 changes: 15 additions & 15 deletions clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
Expand All @@ -17,7 +17,7 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
Expand All @@ -26,7 +26,7 @@ vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
Expand All @@ -35,7 +35,7 @@ vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
Expand All @@ -44,7 +44,7 @@ vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
Expand All @@ -53,7 +53,7 @@ vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
Expand All @@ -62,7 +62,7 @@ vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
Expand All @@ -71,7 +71,7 @@ vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
Expand All @@ -80,7 +80,7 @@ vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
Expand Down Expand Up @@ -179,7 +179,7 @@ vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,

// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16.i64(<vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrsub_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) {
Expand All @@ -188,7 +188,7 @@ vfloat16mf4_t test_vfrsub_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl)

// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrsub.nxv2f16.f16.i64(<vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrsub.nxv2f16.f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrsub_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) {
Expand All @@ -197,7 +197,7 @@ vfloat16mf2_t test_vfrsub_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl)

// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrsub.nxv4f16.f16.i64(<vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrsub.nxv4f16.f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrsub_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {
Expand All @@ -206,7 +206,7 @@ vfloat16m1_t test_vfrsub_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrsub.nxv8f16.f16.i64(<vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrsub.nxv8f16.f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrsub_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {
Expand All @@ -215,7 +215,7 @@ vfloat16m2_t test_vfrsub_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrsub.nxv16f16.f16.i64(<vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrsub.nxv16f16.f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrsub_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {
Expand All @@ -224,7 +224,7 @@ vfloat16m4_t test_vfrsub_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) {

// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrsub.nxv32f16.f16.i64(<vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrsub.nxv32f16.f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrsub_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) {
Expand Down
Loading