52 changes: 52 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcompress.c
Original file line number Diff line number Diff line change
Expand Up @@ -482,4 +482,56 @@ vfloat64m8_t test_vcompress_vm_f64m8 (vbool8_t mask, vfloat64m8_t dest, vfloat64
return vcompress(mask, dest, src, vl);
}

// CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vcompress_vm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t vl) {
return vcompress_tu(mask, merge, src, vl);
}

// CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vcompress_vm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t vl) {
return vcompress_tu(mask, merge, src, vl);
}

// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, size_t vl) {
return vcompress_tu(mask, merge, src, vl);
}

// CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vcompress_vm_i32mf2_ta(vbool64_t mask, vint32mf2_t src, size_t vl) {
return vcompress_ta(mask, src, vl);
}

// CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vcompress_vm_u32mf2_ta(vbool64_t mask, vuint32mf2_t src, size_t vl) {
return vcompress_ta(mask, src, vl);
}

// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vcompress_vm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
return vcompress_ta(mask, src, vl);
}
53 changes: 53 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c
Original file line number Diff line number Diff line change
Expand Up @@ -167,3 +167,56 @@ vfloat64m8_t test_vfabs_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat
return vfabs(mask, maskedoff, op1, vl);
}

// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfabs_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) {
return vfabs_tu(merge, op1, vl);
}

// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfabs_v_f32mf2_ta(vfloat32mf2_t op1, size_t vl) {
return vfabs_ta(op1, vl);
}

// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfabs_v_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) {
return vfabs_tuma(mask, merge, op1, vl);
}

// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfabs_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) {
return vfabs_tumu(mask, merge, op1, vl);
}

// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfabs_v_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {
return vfabs_tama(mask, op1, vl);
}

// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfabs_v_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) {
return vfabs_tamu(mask, merge, op1, vl);
}
18 changes: 18 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c
Original file line number Diff line number Diff line change
Expand Up @@ -94,3 +94,21 @@ vfloat64m8_t test_vfmerge_vfm_f64m8(vbool8_t mask, vfloat64m8_t op1, double op2,
size_t vl) {
return vfmerge(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
return vfmerge_tu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmerge_vfm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
return vfmerge_ta(mask, op1, op2, vl);
}
53 changes: 53 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c
Original file line number Diff line number Diff line change
Expand Up @@ -167,3 +167,56 @@ vfloat64m8_t test_vfneg_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat
return vfneg(mask, maskedoff, op1, vl);
}

// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfneg_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) {
return vfneg_tu(merge, op1, vl);
}

// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfneg_v_f32mf2_ta(vfloat32mf2_t op1, size_t vl) {
return vfneg_ta(op1, vl);
}

// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfneg_v_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) {
return vfneg_tuma(mask, merge, op1, vl);
}

// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfneg_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) {
return vfneg_tumu(mask, merge, op1, vl);
}

// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfneg_v_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {
return vfneg_tama(mask, op1, vl);
}

// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfneg_v_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) {
return vfneg_tamu(mask, merge, op1, vl);
}
36 changes: 36 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c
Original file line number Diff line number Diff line change
Expand Up @@ -194,3 +194,39 @@ vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
vfloat64m1_t scalar, size_t vl) {
return vfredmax(mask, dst, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
return vfredmax_tu(merge, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
return vfredmax_ta(vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tum(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
return vfredmax_tum(mask, merge, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tam(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
return vfredmax_tam(mask, vector, scalar, vl);
}
36 changes: 36 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c
Original file line number Diff line number Diff line change
Expand Up @@ -194,3 +194,39 @@ vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
vfloat64m1_t scalar, size_t vl) {
return vfredmin(mask, dst, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
return vfredmin_tu(merge, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
return vfredmin_ta(vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tum(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
return vfredmin_tum(mask, merge, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tam(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
return vfredmin_tam(mask, vector, scalar, vl);
}
72 changes: 72 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c
Original file line number Diff line number Diff line change
Expand Up @@ -392,3 +392,75 @@ vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
vfloat64m1_t scalar, size_t vl) {
return vfredosum(mask, dst, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
return vfredusum_tu(merge, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
return vfredusum_ta(vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tum(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
return vfredusum_tum(mask, merge, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tam(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
return vfredusum_tam(mask, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
return vfredosum_tu(merge, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
return vfredosum_ta(vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tum(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
return vfredosum_tum(mask, merge, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tam(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
return vfredosum_tam(mask, vector, scalar, vl);
}
72 changes: 72 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c
Original file line number Diff line number Diff line change
Expand Up @@ -224,3 +224,75 @@ vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst,
vfloat64m1_t scalar, size_t vl) {
return vfwredosum(mask, dst, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tu (vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
return vfwredusum_tu(merge, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_ta (vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
return vfwredusum_ta(vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tum(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tum (vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
return vfwredusum_tum(mask, merge, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tam(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tam (vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
return vfwredusum_tam(mask, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
return vfwredosum_tu(merge, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_ta(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
return vfwredosum_ta(vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tum(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tum(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
return vfwredosum_tum(mask, merge, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tam(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
return vfwredosum_tam(mask, vector, scalar, vl);
}
36 changes: 36 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c
Original file line number Diff line number Diff line change
Expand Up @@ -219,3 +219,39 @@ vuint64m8_t test_vid_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
size_t vl) {
return vid(mask, maskedoff, vl);
}

// CHECK-RV64-LABEL: @test_vid_v_u32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vid_v_u32mf2_tu(vuint32mf2_t merge, size_t vl) {
return vid_tu(merge, vl);
}

// CHECK-RV64-LABEL: @test_vid_v_u32mf2_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vid_v_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, size_t vl) {
return vid_tuma(mask, merge, vl);
}

// CHECK-RV64-LABEL: @test_vid_v_u32mf2_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vid_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, size_t vl) {
return vid_tumu(mask, merge, vl);
}

// CHECK-RV64-LABEL: @test_vid_v_u32mf2_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vid_v_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, size_t vl) {
return vid_tamu(mask, merge, vl);
}
80 changes: 58 additions & 22 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c

Large diffs are not rendered by default.

90 changes: 90 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c
Original file line number Diff line number Diff line change
Expand Up @@ -974,3 +974,93 @@ vfloat64m8_t test_vmerge_vvm_f64m8(vbool8_t mask, vfloat64m8_t op1,
vfloat64m8_t op2, size_t vl) {
return vmerge(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmerge_vvm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmerge_tu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmerge_vxm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmerge_tu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmerge_tu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmerge_tu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmerge_vvm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmerge_ta(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmerge_vxm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmerge_ta(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmerge_vvm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmerge_ta(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmerge_vxm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmerge_ta(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmerge_tu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vmerge_vvm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmerge_ta(mask, op1, op2, vl);
}
36 changes: 36 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c
Original file line number Diff line number Diff line change
Expand Up @@ -361,3 +361,39 @@ vbool8_t test_vmfeq_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
vfloat64m8_t op1, double op2, size_t vl) {
return vmfeq(mask, maskedoff, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfeq_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfeq_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfeq_mu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfeq_mu(mask, merge, op1, op2, vl);
}
36 changes: 36 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c
Original file line number Diff line number Diff line change
Expand Up @@ -329,3 +329,39 @@ vbool8_t test_vmfge_vf_f64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat64m8
return vmfge(mask, maskedoff, op1, op2, vl);
}


// CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfge_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfge_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfge_mu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfge_mu(mask, merge, op1, op2, vl);
}
36 changes: 36 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c
Original file line number Diff line number Diff line change
Expand Up @@ -329,3 +329,39 @@ vbool8_t test_vmfgt_vf_f64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat64m8
return vmfgt(mask, maskedoff, op1, op2, vl);
}


// CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfgt_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfgt_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfgt_mu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfgt_mu(mask, merge, op1, op2, vl);
}
36 changes: 36 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c
Original file line number Diff line number Diff line change
Expand Up @@ -361,3 +361,39 @@ vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
vfloat64m8_t op1, double op2, size_t vl) {
return vmfle(mask, maskedoff, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfle_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfle_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfle_mu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfle_mu(mask, merge, op1, op2, vl);
}
36 changes: 36 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c
Original file line number Diff line number Diff line change
Expand Up @@ -361,3 +361,39 @@ vbool8_t test_vmflt_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
vfloat64m8_t op1, double op2, size_t vl) {
return vmflt(mask, maskedoff, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmflt_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
return vmflt_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmflt_mu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
return vmflt_mu(mask, merge, op1, op2, vl);
}
36 changes: 36 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c
Original file line number Diff line number Diff line change
Expand Up @@ -361,3 +361,39 @@ vbool8_t test_vmfne_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
vfloat64m8_t op1, double op2, size_t vl) {
return vmfne(mask, maskedoff, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfne_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfne_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfne_mu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfne_mu(mask, merge, op1, op2, vl);
}
18 changes: 18 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c
Original file line number Diff line number Diff line change
Expand Up @@ -122,3 +122,21 @@ vbool64_t test_vmsbf_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
size_t vl) {
return vmsbf(mask, maskedoff, op1, vl);
}

// CHECK-RV64-LABEL: @test_vmsbf_m_b4_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1.i64(<vscale x 16 x i1> undef, <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbf_m_b4_ma(vbool4_t mask, vbool4_t op1, size_t vl) {
return vmsbf_ma(mask, op1, vl);
}

// CHECK-RV64-LABEL: @test_vmsbf_m_b4_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1.i64(<vscale x 16 x i1> [[MERGE:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbf_m_b4_mu(vbool4_t mask, vbool4_t merge, vbool4_t op1, size_t vl) {
return vmsbf_mu(mask, merge, op1, vl);
}
72 changes: 72 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c
Original file line number Diff line number Diff line change
Expand Up @@ -1699,3 +1699,75 @@ vbool8_t test_vmseq_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
vuint64m8_t op1, uint64_t op2, size_t vl) {
return vmseq(mask, maskedoff, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmseq_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmseq_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmseq_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmseq_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmseq_mu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmseq_mu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmseq_mu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmseq_mu(mask, merge, op1, op2, vl);
}
72 changes: 72 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c
Original file line number Diff line number Diff line change
Expand Up @@ -1588,3 +1588,75 @@ vbool8_t test_vmsgeu_vx_u64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint64m8
return vmsgeu(mask, maskedoff, op1, op2, vl);
}


// CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmsge_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmsge_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsgeu_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsgeu_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmsge_mu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmsge_mu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsgeu_mu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsgeu_mu(mask, merge, op1, op2, vl);
}
72 changes: 72 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c
Original file line number Diff line number Diff line change
Expand Up @@ -1588,3 +1588,75 @@ vbool8_t test_vmsgtu_vx_u64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint64m8
return vmsgtu(mask, maskedoff, op1, op2, vl);
}


// CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmsgt_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmsgt_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsgtu_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsgtu_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmsgt_mu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmsgt_mu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsgtu_mu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsgtu_mu(mask, merge, op1, op2, vl);
}
18 changes: 18 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c
Original file line number Diff line number Diff line change
Expand Up @@ -122,3 +122,21 @@ vbool64_t test_vmsif_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
size_t vl) {
return vmsif(mask, maskedoff, op1, vl);
}

// CHECK-RV64-LABEL: @test_vmsif_m_b4_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1.i64(<vscale x 16 x i1> undef, <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsif_m_b4_ma(vbool4_t mask, vbool4_t op1, size_t vl) {
return vmsif_ma(mask, op1, vl);
}

// CHECK-RV64-LABEL: @test_vmsif_m_b4_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1.i64(<vscale x 16 x i1> [[MERGE:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsif_m_b4_mu(vbool4_t mask, vbool4_t merge, vbool4_t op1, size_t vl) {
return vmsif_mu(mask, merge, op1, vl);
}
72 changes: 72 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c
Original file line number Diff line number Diff line change
Expand Up @@ -1714,3 +1714,75 @@ vbool8_t test_vmsleu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
vuint64m8_t op1, uint64_t op2, size_t vl) {
return vmsleu(mask, maskedoff, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmsle_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmsle_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsleu_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsleu_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmsle_mu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmsle_mu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsleu_mu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsleu_mu(mask, merge, op1, op2, vl);
}
72 changes: 72 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c
Original file line number Diff line number Diff line change
Expand Up @@ -1714,3 +1714,75 @@ vbool8_t test_vmsltu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
vuint64m8_t op1, uint64_t op2, size_t vl) {
return vmsltu(mask, maskedoff, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmslt_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmslt_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsltu_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsltu_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmslt_mu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmslt_mu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsltu_mu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsltu_mu(mask, merge, op1, op2, vl);
}
72 changes: 72 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c
Original file line number Diff line number Diff line change
Expand Up @@ -1699,3 +1699,75 @@ vbool8_t test_vmsne_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
vuint64m8_t op1, uint64_t op2, size_t vl) {
return vmsne(mask, maskedoff, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmsne_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmsne_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsne_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsne_ma(mask, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmsne_mu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmsne_mu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsne_mu(mask, merge, op1, op2, vl);
}

// CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsne_mu(mask, merge, op1, op2, vl);
}
18 changes: 18 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c
Original file line number Diff line number Diff line change
Expand Up @@ -122,3 +122,21 @@ vbool64_t test_vmsof_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
size_t vl) {
return vmsof(mask, maskedoff, op1, vl);
}

// CHECK-RV64-LABEL: @test_vmsof_m_b4_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1.i64(<vscale x 16 x i1> undef, <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsof_m_b4_ma(vbool4_t mask, vbool4_t op1, size_t vl) {
return vmsof_ma(mask, op1, vl);
}

// CHECK-RV64-LABEL: @test_vmsof_m_b4_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1.i64(<vscale x 16 x i1> [[MERGE:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsof_m_b4_mu(vbool4_t mask, vbool4_t merge, vbool4_t op1, size_t vl) {
return vmsof_mu(mask, merge, op1, vl);
}
107 changes: 107 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c
Original file line number Diff line number Diff line change
Expand Up @@ -544,3 +544,110 @@ vuint32m4_t test_vncvt_x_x_w_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuin
return vncvt_x(mask, maskedoff, src, vl);
}

// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vncvt_x_x_w_i16mf4_tu(vint16mf4_t merge, vint32mf2_t src, size_t vl) {
return vncvt_x_tu(merge, src, vl);
}

// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vncvt_x_x_w_u16mf4_tu(vuint16mf4_t merge, vuint32mf2_t src, size_t vl) {
return vncvt_x_tu(merge, src, vl);
}

// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vncvt_x_x_w_i16mf4_ta(vint32mf2_t src, size_t vl) {
return vncvt_x_ta(src, vl);
}

// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vncvt_x_x_w_u16mf4_ta(vuint32mf2_t src, size_t vl) {
return vncvt_x_ta(src, vl);
}

// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vncvt_x_x_w_i16mf4_tuma(vbool64_t mask, vint16mf4_t merge, vint32mf2_t src, size_t vl) {
return vncvt_x_tuma(mask, merge, src, vl);
}

// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vncvt_x_x_w_u16mf4_tuma(vbool64_t mask, vuint16mf4_t merge, vuint32mf2_t src, size_t vl) {
return vncvt_x_tuma(mask, merge, src, vl);
}

// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vncvt_x_x_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t merge, vint32mf2_t src, size_t vl) {
return vncvt_x_tumu(mask, merge, src, vl);
}

// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vncvt_x_x_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t merge, vuint32mf2_t src, size_t vl) {
return vncvt_x_tumu(mask, merge, src, vl);
}

// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vncvt_x_x_w_i16mf4_tama(vbool64_t mask, vint32mf2_t src, size_t vl) {
return vncvt_x_tama(mask, src, vl);
}

// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vncvt_x_x_w_u16mf4_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) {
return vncvt_x_tama(mask, src, vl);
}

// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vncvt_x_x_w_i16mf4_tamu(vbool64_t mask, vint16mf4_t merge, vint32mf2_t src, size_t vl) {
return vncvt_x_tamu(mask, merge, src, vl);
}

// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vncvt_x_x_w_u16mf4_tamu(vbool64_t mask, vuint16mf4_t merge, vuint32mf2_t src, size_t vl) {
return vncvt_x_tamu(mask, merge, src, vl);
}
53 changes: 53 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c
Original file line number Diff line number Diff line change
Expand Up @@ -401,3 +401,56 @@ vint64m8_t test_vneg_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
return vneg(mask, maskedoff, op1, vl);
}

// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vneg_v_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, size_t vl) {
return vneg_tu(merge, op1, vl);
}

// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vneg_v_i32mf2_ta(vint32mf2_t op1, size_t vl) {
return vneg_ta(op1, vl);
}

// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vneg_v_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) {
return vneg_tuma(mask, merge, op1, vl);
}

// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vneg_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) {
return vneg_tumu(mask, merge, op1, vl);
}

// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vneg_v_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t vl) {
return vneg_tama(mask, op1, vl);
}

// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vneg_v_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) {
return vneg_tamu(mask, merge, op1, vl);
}
108 changes: 108 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c
Original file line number Diff line number Diff line change
Expand Up @@ -796,3 +796,111 @@ vuint64m4_t test_vnot_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m
vuint64m8_t test_vnot_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) {
return vnot(mask, maskedoff, op1, vl);
}

// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnot_v_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, size_t vl) {
return vnot_tu(merge, op1, vl);
}

// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnot_v_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, size_t vl) {
return vnot_tu(merge, op1, vl);
}

// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnot_v_i32mf2_ta(vint32mf2_t op1, size_t vl) {
return vnot_ta(op1, vl);
}

// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnot_v_u32mf2_ta(vuint32mf2_t op1, size_t vl) {
return vnot_ta(op1, vl);
}

// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnot_v_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) {
return vnot_tuma(mask, merge, op1, vl);
}

// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tuma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnot_v_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t vl) {
return vnot_tuma(mask, merge, op1, vl);
}

// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnot_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) {
return vnot_tumu(mask, merge, op1, vl);
}

// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tumu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnot_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t vl) {
return vnot_tumu(mask, merge, op1, vl);
}

// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnot_v_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t vl) {
return vnot_tama(mask, op1, vl);
}

// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tama(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnot_v_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, size_t vl) {
return vnot_tama(mask, op1, vl);
}

// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnot_v_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) {
return vnot_tamu(mask, merge, op1, vl);
}

// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tamu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnot_v_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t vl) {
return vnot_tamu(mask, merge, op1, vl);
}
72 changes: 72 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c
Original file line number Diff line number Diff line change
Expand Up @@ -927,3 +927,75 @@ vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
vuint64m1_t scalar, size_t vl) {
return vredand(mask, dst, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredand_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
return vredand_tu(merge, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredand_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
return vredand_tu(merge, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredand_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
return vredand_ta(vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredand_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
return vredand_ta(vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tum(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredand_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
return vredand_tum(mask, merge, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tum(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredand_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
return vredand_tum(mask, merge, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tam(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredand_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
return vredand_tam(mask, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tam(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredand_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
return vredand_tam(mask, vector, scalar, vl);
}
72 changes: 72 additions & 0 deletions clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c
Original file line number Diff line number Diff line change
Expand Up @@ -927,3 +927,75 @@ vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
vuint64m1_t scalar, size_t vl) {
return vredmaxu(mask, dst, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmax_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
return vredmax_tu(merge, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
return vredmaxu_tu(merge, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmax_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
return vredmax_ta(vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_ta(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
return vredmaxu_ta(vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tum(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmax_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
return vredmax_tum(mask, merge, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tum(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
return vredmaxu_tum(mask, merge, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tam(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vredmax_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
return vredmax_tam(mask, vector, scalar, vl);
}

// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tam(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
return vredmaxu_tam(mask, vector, scalar, vl);
}
Loading