diff --git a/clang/include/clang/Basic/riscv_sifive_vector.td b/clang/include/clang/Basic/riscv_sifive_vector.td index d4c22769d9b95..bb54e26641861 100644 --- a/clang/include/clang/Basic/riscv_sifive_vector.td +++ b/clang/include/clang/Basic/riscv_sifive_vector.td @@ -78,29 +78,29 @@ let SupportOverloading = false in { defm sf_vc_iv : RVVVCIXBuiltinSet<["csi", "l"], "0KzKzUvKz", [0, 2, 3], UseGPR=0>; defm sf_vc_vv : RVVVCIXBuiltinSet<["csi", "l"], "0KzKzUvUv", [0, 2, 3], UseGPR=0>; defm sf_vc_fv : RVVVCIXBuiltinSet<["si", "l"], "0KzKzUvFe", [0, 2, 3], UseGPR=0>; - defm sf_vc_xvv : RVVVCIXBuiltinSet<["csi", "l"], "0KzUvUvUe", [0, 1, 3], UseGPR=1>; - defm sf_vc_ivv : RVVVCIXBuiltinSet<["csi", "l"], "0KzUvUvKz", [0, 1, 3], UseGPR=0>; - defm sf_vc_vvv : RVVVCIXBuiltinSet<["csi", "l"], "0KzUvUvUv", [0, 1, 3], UseGPR=0>; - defm sf_vc_fvv : RVVVCIXBuiltinSet<["si", "l"], "0KzUvUvFe", [0, 1, 3], UseGPR=0>; + defm sf_vc_xvv : RVVVCIXBuiltinSet<["csi", "l"], "0KzUvUvUe", [0, 1, 2, 3], UseGPR=1>; + defm sf_vc_ivv : RVVVCIXBuiltinSet<["csi", "l"], "0KzUvUvKz", [0, 1, 2, 3], UseGPR=0>; + defm sf_vc_vvv : RVVVCIXBuiltinSet<["csi", "l"], "0KzUvUvUv", [0, 1, 2, 3], UseGPR=0>; + defm sf_vc_fvv : RVVVCIXBuiltinSet<["si", "l"], "0KzUvUvFe", [0, 1, 2, 3], UseGPR=0>; defm sf_vc_v_x : RVVVCIXBuiltinSet<["csi", "l"], "UvKzKzUe", [-1, 1, 2], UseGPR=1>; defm sf_vc_v_i : RVVVCIXBuiltinSet<["csi", "l"], "UvKzKzKz", [-1, 1, 2], UseGPR=0>; - defm sf_vc_v_xv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUe", [-1, 0, 2], UseGPR=1>; - defm sf_vc_v_iv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvKz", [-1, 0, 2], UseGPR=0>; - defm sf_vc_v_vv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUv", [-1, 0, 2], UseGPR=0>; - defm sf_vc_v_fv : RVVVCIXBuiltinSet<["si", "l"], "UvKzUvFe", [-1, 0, 2], UseGPR=0>; - defm sf_vc_v_xvv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUvUe", [-1, 0, 3], UseGPR=1>; - defm sf_vc_v_ivv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUvKz", [-1, 0, 3], UseGPR=0>; - defm sf_vc_v_vvv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUvUv", [-1, 0, 3], UseGPR=0>; - defm sf_vc_v_fvv : RVVVCIXBuiltinSet<["si", "l"], "UvKzUvUvFe", [-1, 0, 3], UseGPR=0>; + defm sf_vc_v_xv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUe", [-1, 0, 1, 2], UseGPR=1>; + defm sf_vc_v_iv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvKz", [-1, 0, 1, 2], UseGPR=0>; + defm sf_vc_v_vv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUv", [-1, 0, 1, 2], UseGPR=0>; + defm sf_vc_v_fv : RVVVCIXBuiltinSet<["si", "l"], "UvKzUvFe", [-1, 0, 1, 2], UseGPR=0>; + defm sf_vc_v_xvv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUvUe", [-1, 0, 1, 2, 3], UseGPR=1>; + defm sf_vc_v_ivv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUvKz", [-1, 0, 1, 2, 3], UseGPR=0>; + defm sf_vc_v_vvv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUvUv", [-1, 0, 1, 2, 3], UseGPR=0>; + defm sf_vc_v_fvv : RVVVCIXBuiltinSet<["si", "l"], "UvKzUvUvFe", [-1, 0, 1, 2, 3], UseGPR=0>; let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { defm sf_vc_xvw : RVVVCIXBuiltinSet<["csi"], "0KzUwUvUe", [0, 1, 2, 3], UseGPR=1>; defm sf_vc_ivw : RVVVCIXBuiltinSet<["csi"], "0KzUwUvKz", [0, 1, 2, 3], UseGPR=0>; defm sf_vc_vvw : RVVVCIXBuiltinSet<["csi"], "0KzUwUvUv", [0, 1, 2, 3], UseGPR=0>; defm sf_vc_fvw : RVVVCIXBuiltinSet<["si"], "0KzUwUvFe", [0, 1, 2, 3], UseGPR=0>; - defm sf_vc_v_xvw : RVVVCIXBuiltinSet<["csi"], "UwKzUwUvUe", [-1, 0, 2, 3], UseGPR=1>; - defm sf_vc_v_ivw : RVVVCIXBuiltinSet<["csi"], "UwKzUwUvKz", [-1, 0, 2, 3], UseGPR=0>; - defm sf_vc_v_vvw : RVVVCIXBuiltinSet<["csi"], "UwKzUwUvUv", [-1, 0, 2, 3], UseGPR=0>; - defm sf_vc_v_fvw : RVVVCIXBuiltinSet<["si"], "UwKzUwUvFe", [-1, 0, 2, 3], UseGPR=0>; + defm sf_vc_v_xvw : RVVVCIXBuiltinSet<["csi"], "UwKzUwUvUe", [-1, 0, 1, 2, 3], UseGPR=1>; + defm sf_vc_v_ivw : RVVVCIXBuiltinSet<["csi"], "UwKzUwUvKz", [-1, 0, 1, 2, 3], UseGPR=0>; + defm sf_vc_v_vvw : RVVVCIXBuiltinSet<["csi"], "UwKzUwUvUv", [-1, 0, 1, 2, 3], UseGPR=0>; + defm sf_vc_v_fvw : RVVVCIXBuiltinSet<["si"], "UwKzUwUvFe", [-1, 0, 1, 2, 3], UseGPR=0>; } } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xv-rv64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xv-rv64.c index 39704189ae45e..9e305049f22b1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xv-rv64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xv-rv64.c @@ -45,7 +45,7 @@ void test_sf_vc_xv_se_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv1i64.i64.i64.i64(i64 3, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv1i64.i64.nxv1i64.i64.i64(i64 3, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_sf_vc_v_xv_se_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { @@ -54,7 +54,7 @@ vuint64m1_t test_sf_vc_v_xv_se_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv2i64.i64.i64.i64(i64 3, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv2i64.i64.nxv2i64.i64.i64(i64 3, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_sf_vc_v_xv_se_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { @@ -63,7 +63,7 @@ vuint64m2_t test_sf_vc_v_xv_se_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv4i64.i64.i64.i64(i64 3, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv4i64.i64.nxv4i64.i64.i64(i64 3, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_sf_vc_v_xv_se_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { @@ -72,7 +72,7 @@ vuint64m4_t test_sf_vc_v_xv_se_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv8i64.i64.i64.i64(i64 3, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv8i64.i64.nxv8i64.i64.i64(i64 3, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_sf_vc_v_xv_se_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { @@ -81,7 +81,7 @@ vuint64m8_t test_sf_vc_v_xv_se_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv1i64.i64.i64.i64(i64 3, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv1i64.i64.nxv1i64.i64.i64(i64 3, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_sf_vc_v_xv_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { @@ -90,7 +90,7 @@ vuint64m1_t test_sf_vc_v_xv_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv2i64.i64.i64.i64(i64 3, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv2i64.i64.nxv2i64.i64.i64(i64 3, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_sf_vc_v_xv_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { @@ -99,7 +99,7 @@ vuint64m2_t test_sf_vc_v_xv_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv4i64.i64.i64.i64(i64 3, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv4i64.i64.nxv4i64.i64.i64(i64 3, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_sf_vc_v_xv_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { @@ -108,7 +108,7 @@ vuint64m4_t test_sf_vc_v_xv_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv8i64.i64.i64.i64(i64 3, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv8i64.i64.nxv8i64.i64.i64(i64 3, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_sf_vc_v_xv_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xv.c index f37748ac3020b..a9e1f30b73b28 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xv.c @@ -320,12 +320,12 @@ void test_sf_vc_vv_se_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv1i8.i32.nxv1i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv1i8.i32.nxv1i8.nxv1i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv1i8.i64.nxv1i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv1i8.i64.nxv1i8.nxv1i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_sf_vc_v_vv_se_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { @@ -334,12 +334,12 @@ vuint8mf8_t test_sf_vc_v_vv_se_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv2i8.i32.nxv2i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv2i8.i32.nxv2i8.nxv2i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv2i8.i64.nxv2i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv2i8.i64.nxv2i8.nxv2i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_sf_vc_v_vv_se_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { @@ -348,12 +348,12 @@ vuint8mf4_t test_sf_vc_v_vv_se_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv4i8.i32.nxv4i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv4i8.i32.nxv4i8.nxv4i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv4i8.i64.nxv4i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv4i8.i64.nxv4i8.nxv4i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_sf_vc_v_vv_se_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { @@ -362,12 +362,12 @@ vuint8mf2_t test_sf_vc_v_vv_se_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv8i8.i32.nxv8i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv8i8.i32.nxv8i8.nxv8i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv8i8.i64.nxv8i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv8i8.i64.nxv8i8.nxv8i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_sf_vc_v_vv_se_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { @@ -376,12 +376,12 @@ vuint8m1_t test_sf_vc_v_vv_se_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv16i8.i32.nxv16i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv16i8.i32.nxv16i8.nxv16i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv16i8.i64.nxv16i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv16i8.i64.nxv16i8.nxv16i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_sf_vc_v_vv_se_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { @@ -390,12 +390,12 @@ vuint8m2_t test_sf_vc_v_vv_se_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv32i8.i32.nxv32i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv32i8.i32.nxv32i8.nxv32i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv32i8.i64.nxv32i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv32i8.i64.nxv32i8.nxv32i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_sf_vc_v_vv_se_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { @@ -404,12 +404,12 @@ vuint8m4_t test_sf_vc_v_vv_se_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv64i8.i32.nxv64i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv64i8.i32.nxv64i8.nxv64i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv64i8.i64.nxv64i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv64i8.i64.nxv64i8.nxv64i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_sf_vc_v_vv_se_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { @@ -418,12 +418,12 @@ vuint8m8_t test_sf_vc_v_vv_se_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv1i16.i32.nxv1i16.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv1i16.i32.nxv1i16.nxv1i16.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv1i16.i64.nxv1i16.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv1i16.i64.nxv1i16.nxv1i16.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_sf_vc_v_vv_se_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { @@ -432,12 +432,12 @@ vuint16mf4_t test_sf_vc_v_vv_se_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_ // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv2i16.i32.nxv2i16.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv2i16.i32.nxv2i16.nxv2i16.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv2i16.i64.nxv2i16.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv2i16.i64.nxv2i16.nxv2i16.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_sf_vc_v_vv_se_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { @@ -446,12 +446,12 @@ vuint16mf2_t test_sf_vc_v_vv_se_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_ // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv4i16.i32.nxv4i16.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv4i16.i32.nxv4i16.nxv4i16.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv4i16.i64.nxv4i16.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv4i16.i64.nxv4i16.nxv4i16.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_sf_vc_v_vv_se_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { @@ -460,12 +460,12 @@ vuint16m1_t test_sf_vc_v_vv_se_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv8i16.i32.nxv8i16.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv8i16.i32.nxv8i16.nxv8i16.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv8i16.i64.nxv8i16.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv8i16.i64.nxv8i16.nxv8i16.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_sf_vc_v_vv_se_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { @@ -474,12 +474,12 @@ vuint16m2_t test_sf_vc_v_vv_se_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv16i16.i32.nxv16i16.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv16i16.i32.nxv16i16.nxv16i16.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv16i16.i64.nxv16i16.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv16i16.i64.nxv16i16.nxv16i16.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_sf_vc_v_vv_se_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { @@ -488,12 +488,12 @@ vuint16m4_t test_sf_vc_v_vv_se_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv32i16.i32.nxv32i16.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv32i16.i32.nxv32i16.nxv32i16.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv32i16.i64.nxv32i16.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv32i16.i64.nxv32i16.nxv32i16.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_sf_vc_v_vv_se_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { @@ -502,12 +502,12 @@ vuint16m8_t test_sf_vc_v_vv_se_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv1i32.i32.nxv1i32.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv1i32.i32.nxv1i32.nxv1i32.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv1i32.i64.nxv1i32.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv1i32.i64.nxv1i32.nxv1i32.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_sf_vc_v_vv_se_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { @@ -516,12 +516,12 @@ vuint32mf2_t test_sf_vc_v_vv_se_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_ // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv2i32.i32.nxv2i32.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv2i32.i32.nxv2i32.nxv2i32.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv2i32.i64.nxv2i32.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv2i32.i64.nxv2i32.nxv2i32.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_sf_vc_v_vv_se_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { @@ -530,12 +530,12 @@ vuint32m1_t test_sf_vc_v_vv_se_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv4i32.i32.nxv4i32.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv4i32.i32.nxv4i32.nxv4i32.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv4i32.i64.nxv4i32.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv4i32.i64.nxv4i32.nxv4i32.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_sf_vc_v_vv_se_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { @@ -544,12 +544,12 @@ vuint32m2_t test_sf_vc_v_vv_se_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv8i32.i32.nxv8i32.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv8i32.i32.nxv8i32.nxv8i32.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv8i32.i64.nxv8i32.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv8i32.i64.nxv8i32.nxv8i32.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_sf_vc_v_vv_se_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { @@ -558,12 +558,12 @@ vuint32m4_t test_sf_vc_v_vv_se_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv16i32.i32.nxv16i32.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv16i32.i32.nxv16i32.nxv16i32.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv16i32.i64.nxv16i32.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv16i32.i64.nxv16i32.nxv16i32.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_sf_vc_v_vv_se_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { @@ -572,12 +572,12 @@ vuint32m8_t test_sf_vc_v_vv_se_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv1i64.i32.nxv1i64.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv1i64.i32.nxv1i64.nxv1i64.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv1i64.i64.nxv1i64.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv1i64.i64.nxv1i64.nxv1i64.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_sf_vc_v_vv_se_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { @@ -586,12 +586,12 @@ vuint64m1_t test_sf_vc_v_vv_se_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv2i64.i32.nxv2i64.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv2i64.i32.nxv2i64.nxv2i64.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv2i64.i64.nxv2i64.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv2i64.i64.nxv2i64.nxv2i64.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_sf_vc_v_vv_se_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { @@ -600,12 +600,12 @@ vuint64m2_t test_sf_vc_v_vv_se_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv4i64.i32.nxv4i64.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv4i64.i32.nxv4i64.nxv4i64.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv4i64.i64.nxv4i64.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv4i64.i64.nxv4i64.nxv4i64.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_sf_vc_v_vv_se_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { @@ -614,12 +614,12 @@ vuint64m4_t test_sf_vc_v_vv_se_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_vv_se_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv8i64.i32.nxv8i64.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv8i64.i32.nxv8i64.nxv8i64.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_se_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv8i64.i64.nxv8i64.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.se.nxv8i64.i64.nxv8i64.nxv8i64.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_sf_vc_v_vv_se_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { @@ -628,12 +628,12 @@ vuint64m8_t test_sf_vc_v_vv_se_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv1i8.i32.nxv1i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv1i8.i32.nxv1i8.nxv1i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv1i8.i64.nxv1i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv1i8.i64.nxv1i8.nxv1i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_sf_vc_v_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { @@ -642,12 +642,12 @@ vuint8mf8_t test_sf_vc_v_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv2i8.i32.nxv2i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv2i8.i32.nxv2i8.nxv2i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv2i8.i64.nxv2i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv2i8.i64.nxv2i8.nxv2i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_sf_vc_v_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { @@ -656,12 +656,12 @@ vuint8mf4_t test_sf_vc_v_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv4i8.i32.nxv4i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv4i8.i32.nxv4i8.nxv4i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv4i8.i64.nxv4i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv4i8.i64.nxv4i8.nxv4i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_sf_vc_v_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { @@ -670,12 +670,12 @@ vuint8mf2_t test_sf_vc_v_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv8i8.i32.nxv8i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv8i8.i32.nxv8i8.nxv8i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv8i8.i64.nxv8i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv8i8.i64.nxv8i8.nxv8i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_sf_vc_v_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { @@ -684,12 +684,12 @@ vuint8m1_t test_sf_vc_v_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv16i8.i32.nxv16i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv16i8.i32.nxv16i8.nxv16i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv16i8.i64.nxv16i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv16i8.i64.nxv16i8.nxv16i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_sf_vc_v_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { @@ -698,12 +698,12 @@ vuint8m2_t test_sf_vc_v_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv32i8.i32.nxv32i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv32i8.i32.nxv32i8.nxv32i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv32i8.i64.nxv32i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv32i8.i64.nxv32i8.nxv32i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_sf_vc_v_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { @@ -712,12 +712,12 @@ vuint8m4_t test_sf_vc_v_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv64i8.i32.nxv64i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv64i8.i32.nxv64i8.nxv64i8.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv64i8.i64.nxv64i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv64i8.i64.nxv64i8.nxv64i8.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_sf_vc_v_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { @@ -726,12 +726,12 @@ vuint8m8_t test_sf_vc_v_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv1i16.i32.nxv1i16.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv1i16.i32.nxv1i16.nxv1i16.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv1i16.i64.nxv1i16.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv1i16.i64.nxv1i16.nxv1i16.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_sf_vc_v_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { @@ -740,12 +740,12 @@ vuint16mf4_t test_sf_vc_v_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t v // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv2i16.i32.nxv2i16.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv2i16.i32.nxv2i16.nxv2i16.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv2i16.i64.nxv2i16.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv2i16.i64.nxv2i16.nxv2i16.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_sf_vc_v_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { @@ -754,12 +754,12 @@ vuint16mf2_t test_sf_vc_v_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t v // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv4i16.i32.nxv4i16.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv4i16.i32.nxv4i16.nxv4i16.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv4i16.i64.nxv4i16.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv4i16.i64.nxv4i16.nxv4i16.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_sf_vc_v_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { @@ -768,12 +768,12 @@ vuint16m1_t test_sf_vc_v_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv8i16.i32.nxv8i16.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv8i16.i32.nxv8i16.nxv8i16.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv8i16.i64.nxv8i16.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv8i16.i64.nxv8i16.nxv8i16.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_sf_vc_v_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { @@ -782,12 +782,12 @@ vuint16m2_t test_sf_vc_v_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv16i16.i32.nxv16i16.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv16i16.i32.nxv16i16.nxv16i16.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv16i16.i64.nxv16i16.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv16i16.i64.nxv16i16.nxv16i16.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_sf_vc_v_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { @@ -796,12 +796,12 @@ vuint16m4_t test_sf_vc_v_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv32i16.i32.nxv32i16.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv32i16.i32.nxv32i16.nxv32i16.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv32i16.i64.nxv32i16.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv32i16.i64.nxv32i16.nxv32i16.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_sf_vc_v_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { @@ -810,12 +810,12 @@ vuint16m8_t test_sf_vc_v_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv1i32.i32.nxv1i32.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv1i32.i32.nxv1i32.nxv1i32.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv1i32.i64.nxv1i32.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv1i32.i64.nxv1i32.nxv1i32.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_sf_vc_v_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { @@ -824,12 +824,12 @@ vuint32mf2_t test_sf_vc_v_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t v // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv2i32.i32.nxv2i32.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv2i32.i32.nxv2i32.nxv2i32.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv2i32.i64.nxv2i32.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv2i32.i64.nxv2i32.nxv2i32.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_sf_vc_v_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { @@ -838,12 +838,12 @@ vuint32m1_t test_sf_vc_v_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv4i32.i32.nxv4i32.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv4i32.i32.nxv4i32.nxv4i32.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv4i32.i64.nxv4i32.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv4i32.i64.nxv4i32.nxv4i32.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_sf_vc_v_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { @@ -852,12 +852,12 @@ vuint32m2_t test_sf_vc_v_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv8i32.i32.nxv8i32.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv8i32.i32.nxv8i32.nxv8i32.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv8i32.i64.nxv8i32.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv8i32.i64.nxv8i32.nxv8i32.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_sf_vc_v_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { @@ -866,12 +866,12 @@ vuint32m4_t test_sf_vc_v_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv16i32.i32.nxv16i32.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv16i32.i32.nxv16i32.nxv16i32.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv16i32.i64.nxv16i32.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv16i32.i64.nxv16i32.nxv16i32.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_sf_vc_v_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { @@ -880,12 +880,12 @@ vuint32m8_t test_sf_vc_v_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv1i64.i32.nxv1i64.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv1i64.i32.nxv1i64.nxv1i64.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv1i64.i64.nxv1i64.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv1i64.i64.nxv1i64.nxv1i64.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_sf_vc_v_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { @@ -894,12 +894,12 @@ vuint64m1_t test_sf_vc_v_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv2i64.i32.nxv2i64.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv2i64.i32.nxv2i64.nxv2i64.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv2i64.i64.nxv2i64.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv2i64.i64.nxv2i64.nxv2i64.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_sf_vc_v_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { @@ -908,12 +908,12 @@ vuint64m2_t test_sf_vc_v_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv4i64.i32.nxv4i64.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv4i64.i32.nxv4i64.nxv4i64.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv4i64.i64.nxv4i64.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv4i64.i64.nxv4i64.nxv4i64.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_sf_vc_v_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { @@ -922,12 +922,12 @@ vuint64m4_t test_sf_vc_v_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv8i64.i32.nxv8i64.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv8i64.i32.nxv8i64.nxv8i64.i32(i32 3, [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv8i64.i64.nxv8i64.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vv.nxv8i64.i64.nxv8i64.nxv8i64.i64(i64 3, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_sf_vc_v_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { @@ -1188,12 +1188,12 @@ void test_sf_vc_xv_se_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv1i8.i32.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv1i8.i32.nxv1i8.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv1i8.i64.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv1i8.i64.nxv1i8.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_sf_vc_v_xv_se_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { @@ -1202,12 +1202,12 @@ vuint8mf8_t test_sf_vc_v_xv_se_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv2i8.i32.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv2i8.i32.nxv2i8.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv2i8.i64.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv2i8.i64.nxv2i8.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_sf_vc_v_xv_se_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { @@ -1216,12 +1216,12 @@ vuint8mf4_t test_sf_vc_v_xv_se_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv4i8.i32.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv4i8.i32.nxv4i8.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv4i8.i64.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv4i8.i64.nxv4i8.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_sf_vc_v_xv_se_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { @@ -1230,12 +1230,12 @@ vuint8mf2_t test_sf_vc_v_xv_se_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv8i8.i32.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv8i8.i32.nxv8i8.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv8i8.i64.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv8i8.i64.nxv8i8.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_sf_vc_v_xv_se_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { @@ -1244,12 +1244,12 @@ vuint8m1_t test_sf_vc_v_xv_se_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv16i8.i32.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv16i8.i32.nxv16i8.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv16i8.i64.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv16i8.i64.nxv16i8.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_sf_vc_v_xv_se_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { @@ -1258,12 +1258,12 @@ vuint8m2_t test_sf_vc_v_xv_se_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv32i8.i32.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv32i8.i32.nxv32i8.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv32i8.i64.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv32i8.i64.nxv32i8.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_sf_vc_v_xv_se_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { @@ -1272,12 +1272,12 @@ vuint8m4_t test_sf_vc_v_xv_se_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv64i8.i32.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv64i8.i32.nxv64i8.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv64i8.i64.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv64i8.i64.nxv64i8.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_sf_vc_v_xv_se_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { @@ -1286,12 +1286,12 @@ vuint8m8_t test_sf_vc_v_xv_se_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv1i16.i32.i16.i32(i32 3, [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv1i16.i32.nxv1i16.i16.i32(i32 3, [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv1i16.i64.i16.i64(i64 3, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv1i16.i64.nxv1i16.i16.i64(i64 3, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_sf_vc_v_xv_se_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { @@ -1300,12 +1300,12 @@ vuint16mf4_t test_sf_vc_v_xv_se_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv2i16.i32.i16.i32(i32 3, [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv2i16.i32.nxv2i16.i16.i32(i32 3, [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv2i16.i64.i16.i64(i64 3, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv2i16.i64.nxv2i16.i16.i64(i64 3, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_sf_vc_v_xv_se_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { @@ -1314,12 +1314,12 @@ vuint16mf2_t test_sf_vc_v_xv_se_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv4i16.i32.i16.i32(i32 3, [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv4i16.i32.nxv4i16.i16.i32(i32 3, [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv4i16.i64.i16.i64(i64 3, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv4i16.i64.nxv4i16.i16.i64(i64 3, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_sf_vc_v_xv_se_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { @@ -1328,12 +1328,12 @@ vuint16m1_t test_sf_vc_v_xv_se_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv8i16.i32.i16.i32(i32 3, [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv8i16.i32.nxv8i16.i16.i32(i32 3, [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv8i16.i64.i16.i64(i64 3, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv8i16.i64.nxv8i16.i16.i64(i64 3, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_sf_vc_v_xv_se_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { @@ -1342,12 +1342,12 @@ vuint16m2_t test_sf_vc_v_xv_se_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv16i16.i32.i16.i32(i32 3, [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv16i16.i32.nxv16i16.i16.i32(i32 3, [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv16i16.i64.i16.i64(i64 3, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv16i16.i64.nxv16i16.i16.i64(i64 3, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_sf_vc_v_xv_se_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { @@ -1356,12 +1356,12 @@ vuint16m4_t test_sf_vc_v_xv_se_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv32i16.i32.i16.i32(i32 3, [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv32i16.i32.nxv32i16.i16.i32(i32 3, [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv32i16.i64.i16.i64(i64 3, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv32i16.i64.nxv32i16.i16.i64(i64 3, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_sf_vc_v_xv_se_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { @@ -1370,12 +1370,12 @@ vuint16m8_t test_sf_vc_v_xv_se_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv1i32.i32.i32.i32(i32 3, [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv1i32.i32.nxv1i32.i32.i32(i32 3, [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv1i32.i64.i32.i64(i64 3, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv1i32.i64.nxv1i32.i32.i64(i64 3, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_sf_vc_v_xv_se_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { @@ -1384,12 +1384,12 @@ vuint32mf2_t test_sf_vc_v_xv_se_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv2i32.i32.i32.i32(i32 3, [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv2i32.i32.nxv2i32.i32.i32(i32 3, [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv2i32.i64.i32.i64(i64 3, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv2i32.i64.nxv2i32.i32.i64(i64 3, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_sf_vc_v_xv_se_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { @@ -1398,12 +1398,12 @@ vuint32m1_t test_sf_vc_v_xv_se_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv4i32.i32.i32.i32(i32 3, [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv4i32.i32.nxv4i32.i32.i32(i32 3, [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv4i32.i64.i32.i64(i64 3, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv4i32.i64.nxv4i32.i32.i64(i64 3, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_sf_vc_v_xv_se_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { @@ -1412,12 +1412,12 @@ vuint32m2_t test_sf_vc_v_xv_se_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv8i32.i32.i32.i32(i32 3, [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv8i32.i32.nxv8i32.i32.i32(i32 3, [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv8i32.i64.i32.i64(i64 3, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv8i32.i64.nxv8i32.i32.i64(i64 3, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_sf_vc_v_xv_se_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { @@ -1426,12 +1426,12 @@ vuint32m4_t test_sf_vc_v_xv_se_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_se_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv16i32.i32.i32.i32(i32 3, [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv16i32.i32.nxv16i32.i32.i32(i32 3, [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_se_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv16i32.i64.i32.i64(i64 3, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.se.nxv16i32.i64.nxv16i32.i32.i64(i64 3, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_sf_vc_v_xv_se_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { @@ -1440,12 +1440,12 @@ vuint32m8_t test_sf_vc_v_xv_se_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv1i8.i32.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv1i8.i32.nxv1i8.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv1i8.i64.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv1i8.i64.nxv1i8.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_sf_vc_v_xv_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { @@ -1454,12 +1454,12 @@ vuint8mf8_t test_sf_vc_v_xv_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv2i8.i32.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv2i8.i32.nxv2i8.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv2i8.i64.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv2i8.i64.nxv2i8.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_sf_vc_v_xv_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { @@ -1468,12 +1468,12 @@ vuint8mf4_t test_sf_vc_v_xv_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv4i8.i32.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv4i8.i32.nxv4i8.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv4i8.i64.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv4i8.i64.nxv4i8.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_sf_vc_v_xv_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { @@ -1482,12 +1482,12 @@ vuint8mf2_t test_sf_vc_v_xv_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv8i8.i32.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv8i8.i32.nxv8i8.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv8i8.i64.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv8i8.i64.nxv8i8.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_sf_vc_v_xv_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { @@ -1496,12 +1496,12 @@ vuint8m1_t test_sf_vc_v_xv_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv16i8.i32.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv16i8.i32.nxv16i8.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv16i8.i64.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv16i8.i64.nxv16i8.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_sf_vc_v_xv_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { @@ -1510,12 +1510,12 @@ vuint8m2_t test_sf_vc_v_xv_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv32i8.i32.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv32i8.i32.nxv32i8.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv32i8.i64.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv32i8.i64.nxv32i8.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_sf_vc_v_xv_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { @@ -1524,12 +1524,12 @@ vuint8m4_t test_sf_vc_v_xv_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv64i8.i32.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv64i8.i32.nxv64i8.i8.i32(i32 3, [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv64i8.i64.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv64i8.i64.nxv64i8.i8.i64(i64 3, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_sf_vc_v_xv_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { @@ -1538,12 +1538,12 @@ vuint8m8_t test_sf_vc_v_xv_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv1i16.i32.i16.i32(i32 3, [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv1i16.i32.nxv1i16.i16.i32(i32 3, [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv1i16.i64.i16.i64(i64 3, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv1i16.i64.nxv1i16.i16.i64(i64 3, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_sf_vc_v_xv_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { @@ -1552,12 +1552,12 @@ vuint16mf4_t test_sf_vc_v_xv_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv2i16.i32.i16.i32(i32 3, [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv2i16.i32.nxv2i16.i16.i32(i32 3, [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv2i16.i64.i16.i64(i64 3, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv2i16.i64.nxv2i16.i16.i64(i64 3, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_sf_vc_v_xv_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { @@ -1566,12 +1566,12 @@ vuint16mf2_t test_sf_vc_v_xv_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv4i16.i32.i16.i32(i32 3, [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv4i16.i32.nxv4i16.i16.i32(i32 3, [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv4i16.i64.i16.i64(i64 3, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv4i16.i64.nxv4i16.i16.i64(i64 3, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_sf_vc_v_xv_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { @@ -1580,12 +1580,12 @@ vuint16m1_t test_sf_vc_v_xv_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv8i16.i32.i16.i32(i32 3, [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv8i16.i32.nxv8i16.i16.i32(i32 3, [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv8i16.i64.i16.i64(i64 3, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv8i16.i64.nxv8i16.i16.i64(i64 3, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_sf_vc_v_xv_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { @@ -1594,12 +1594,12 @@ vuint16m2_t test_sf_vc_v_xv_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv16i16.i32.i16.i32(i32 3, [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv16i16.i32.nxv16i16.i16.i32(i32 3, [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv16i16.i64.i16.i64(i64 3, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv16i16.i64.nxv16i16.i16.i64(i64 3, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_sf_vc_v_xv_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { @@ -1608,12 +1608,12 @@ vuint16m4_t test_sf_vc_v_xv_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv32i16.i32.i16.i32(i32 3, [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv32i16.i32.nxv32i16.i16.i32(i32 3, [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv32i16.i64.i16.i64(i64 3, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv32i16.i64.nxv32i16.i16.i64(i64 3, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_sf_vc_v_xv_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { @@ -1622,12 +1622,12 @@ vuint16m8_t test_sf_vc_v_xv_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv1i32.i32.i32.i32(i32 3, [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv1i32.i32.nxv1i32.i32.i32(i32 3, [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv1i32.i64.i32.i64(i64 3, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv1i32.i64.nxv1i32.i32.i64(i64 3, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_sf_vc_v_xv_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { @@ -1636,12 +1636,12 @@ vuint32mf2_t test_sf_vc_v_xv_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv2i32.i32.i32.i32(i32 3, [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv2i32.i32.nxv2i32.i32.i32(i32 3, [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv2i32.i64.i32.i64(i64 3, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv2i32.i64.nxv2i32.i32.i64(i64 3, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_sf_vc_v_xv_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { @@ -1650,12 +1650,12 @@ vuint32m1_t test_sf_vc_v_xv_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv4i32.i32.i32.i32(i32 3, [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv4i32.i32.nxv4i32.i32.i32(i32 3, [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv4i32.i64.i32.i64(i64 3, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv4i32.i64.nxv4i32.i32.i64(i64 3, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_sf_vc_v_xv_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { @@ -1664,12 +1664,12 @@ vuint32m2_t test_sf_vc_v_xv_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv8i32.i32.i32.i32(i32 3, [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv8i32.i32.nxv8i32.i32.i32(i32 3, [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv8i32.i64.i32.i64(i64 3, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv8i32.i64.nxv8i32.i32.i64(i64 3, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_sf_vc_v_xv_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { @@ -1678,12 +1678,12 @@ vuint32m4_t test_sf_vc_v_xv_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_xv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv16i32.i32.i32.i32(i32 3, [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv16i32.i32.nxv16i32.i32.i32(i32 3, [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv16i32.i64.i32.i64(i64 3, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xv.nxv16i32.i64.nxv16i32.i32.i64(i64 3, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_sf_vc_v_xv_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { @@ -2000,12 +2000,12 @@ void test_sf_vc_iv_se_u64m8(vuint64m8_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv1i8.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv1i8.i32.nxv1i8.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv1i8.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv1i8.i64.nxv1i8.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_sf_vc_v_iv_se_u8mf8(vuint8mf8_t vs2, size_t vl) { @@ -2014,12 +2014,12 @@ vuint8mf8_t test_sf_vc_v_iv_se_u8mf8(vuint8mf8_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv2i8.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv2i8.i32.nxv2i8.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv2i8.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv2i8.i64.nxv2i8.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_sf_vc_v_iv_se_u8mf4(vuint8mf4_t vs2, size_t vl) { @@ -2028,12 +2028,12 @@ vuint8mf4_t test_sf_vc_v_iv_se_u8mf4(vuint8mf4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv4i8.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv4i8.i32.nxv4i8.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv4i8.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv4i8.i64.nxv4i8.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_sf_vc_v_iv_se_u8mf2(vuint8mf2_t vs2, size_t vl) { @@ -2042,12 +2042,12 @@ vuint8mf2_t test_sf_vc_v_iv_se_u8mf2(vuint8mf2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv8i8.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv8i8.i32.nxv8i8.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv8i8.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv8i8.i64.nxv8i8.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_sf_vc_v_iv_se_u8m1(vuint8m1_t vs2, size_t vl) { @@ -2056,12 +2056,12 @@ vuint8m1_t test_sf_vc_v_iv_se_u8m1(vuint8m1_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv16i8.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv16i8.i32.nxv16i8.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv16i8.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv16i8.i64.nxv16i8.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_sf_vc_v_iv_se_u8m2(vuint8m2_t vs2, size_t vl) { @@ -2070,12 +2070,12 @@ vuint8m2_t test_sf_vc_v_iv_se_u8m2(vuint8m2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv32i8.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv32i8.i32.nxv32i8.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv32i8.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv32i8.i64.nxv32i8.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_sf_vc_v_iv_se_u8m4(vuint8m4_t vs2, size_t vl) { @@ -2084,12 +2084,12 @@ vuint8m4_t test_sf_vc_v_iv_se_u8m4(vuint8m4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv64i8.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv64i8.i32.nxv64i8.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv64i8.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv64i8.i64.nxv64i8.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_sf_vc_v_iv_se_u8m8(vuint8m8_t vs2, size_t vl) { @@ -2098,12 +2098,12 @@ vuint8m8_t test_sf_vc_v_iv_se_u8m8(vuint8m8_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv1i16.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv1i16.i32.nxv1i16.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv1i16.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv1i16.i64.nxv1i16.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_sf_vc_v_iv_se_u16mf4(vuint16mf4_t vs2, size_t vl) { @@ -2112,12 +2112,12 @@ vuint16mf4_t test_sf_vc_v_iv_se_u16mf4(vuint16mf4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv2i16.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv2i16.i32.nxv2i16.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv2i16.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv2i16.i64.nxv2i16.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_sf_vc_v_iv_se_u16mf2(vuint16mf2_t vs2, size_t vl) { @@ -2126,12 +2126,12 @@ vuint16mf2_t test_sf_vc_v_iv_se_u16mf2(vuint16mf2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv4i16.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv4i16.i32.nxv4i16.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv4i16.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv4i16.i64.nxv4i16.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_sf_vc_v_iv_se_u16m1(vuint16m1_t vs2, size_t vl) { @@ -2140,12 +2140,12 @@ vuint16m1_t test_sf_vc_v_iv_se_u16m1(vuint16m1_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv8i16.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv8i16.i32.nxv8i16.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv8i16.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv8i16.i64.nxv8i16.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_sf_vc_v_iv_se_u16m2(vuint16m2_t vs2, size_t vl) { @@ -2154,12 +2154,12 @@ vuint16m2_t test_sf_vc_v_iv_se_u16m2(vuint16m2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv16i16.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv16i16.i32.nxv16i16.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv16i16.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv16i16.i64.nxv16i16.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_sf_vc_v_iv_se_u16m4(vuint16m4_t vs2, size_t vl) { @@ -2168,12 +2168,12 @@ vuint16m4_t test_sf_vc_v_iv_se_u16m4(vuint16m4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv32i16.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv32i16.i32.nxv32i16.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv32i16.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv32i16.i64.nxv32i16.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_sf_vc_v_iv_se_u16m8(vuint16m8_t vs2, size_t vl) { @@ -2182,12 +2182,12 @@ vuint16m8_t test_sf_vc_v_iv_se_u16m8(vuint16m8_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv1i32.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv1i32.i32.nxv1i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv1i32.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv1i32.i64.nxv1i32.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_sf_vc_v_iv_se_u32mf2(vuint32mf2_t vs2, size_t vl) { @@ -2196,12 +2196,12 @@ vuint32mf2_t test_sf_vc_v_iv_se_u32mf2(vuint32mf2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv2i32.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv2i32.i32.nxv2i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv2i32.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv2i32.i64.nxv2i32.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_sf_vc_v_iv_se_u32m1(vuint32m1_t vs2, size_t vl) { @@ -2210,12 +2210,12 @@ vuint32m1_t test_sf_vc_v_iv_se_u32m1(vuint32m1_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv4i32.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv4i32.i32.nxv4i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv4i32.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv4i32.i64.nxv4i32.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_sf_vc_v_iv_se_u32m2(vuint32m2_t vs2, size_t vl) { @@ -2224,12 +2224,12 @@ vuint32m2_t test_sf_vc_v_iv_se_u32m2(vuint32m2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv8i32.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv8i32.i32.nxv8i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv8i32.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv8i32.i64.nxv8i32.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_sf_vc_v_iv_se_u32m4(vuint32m4_t vs2, size_t vl) { @@ -2238,12 +2238,12 @@ vuint32m4_t test_sf_vc_v_iv_se_u32m4(vuint32m4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv16i32.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv16i32.i32.nxv16i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv16i32.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv16i32.i64.nxv16i32.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_sf_vc_v_iv_se_u32m8(vuint32m8_t vs2, size_t vl) { @@ -2252,12 +2252,12 @@ vuint32m8_t test_sf_vc_v_iv_se_u32m8(vuint32m8_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv1i64.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv1i64.i32.nxv1i64.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv1i64.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv1i64.i64.nxv1i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_sf_vc_v_iv_se_u64m1(vuint64m1_t vs2, size_t vl) { @@ -2266,12 +2266,12 @@ vuint64m1_t test_sf_vc_v_iv_se_u64m1(vuint64m1_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv2i64.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv2i64.i32.nxv2i64.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv2i64.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv2i64.i64.nxv2i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_sf_vc_v_iv_se_u64m2(vuint64m2_t vs2, size_t vl) { @@ -2280,12 +2280,12 @@ vuint64m2_t test_sf_vc_v_iv_se_u64m2(vuint64m2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv4i64.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv4i64.i32.nxv4i64.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv4i64.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv4i64.i64.nxv4i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_sf_vc_v_iv_se_u64m4(vuint64m4_t vs2, size_t vl) { @@ -2294,12 +2294,12 @@ vuint64m4_t test_sf_vc_v_iv_se_u64m4(vuint64m4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_se_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv8i64.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv8i64.i32.nxv8i64.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_se_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv8i64.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.se.nxv8i64.i64.nxv8i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_sf_vc_v_iv_se_u64m8(vuint64m8_t vs2, size_t vl) { @@ -2308,12 +2308,12 @@ vuint64m8_t test_sf_vc_v_iv_se_u64m8(vuint64m8_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv1i8.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv1i8.i32.nxv1i8.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv1i8.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv1i8.i64.nxv1i8.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_sf_vc_v_iv_u8mf8(vuint8mf8_t vs2, size_t vl) { @@ -2322,12 +2322,12 @@ vuint8mf8_t test_sf_vc_v_iv_u8mf8(vuint8mf8_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv2i8.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv2i8.i32.nxv2i8.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv2i8.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv2i8.i64.nxv2i8.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_sf_vc_v_iv_u8mf4(vuint8mf4_t vs2, size_t vl) { @@ -2336,12 +2336,12 @@ vuint8mf4_t test_sf_vc_v_iv_u8mf4(vuint8mf4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv4i8.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv4i8.i32.nxv4i8.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv4i8.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv4i8.i64.nxv4i8.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_sf_vc_v_iv_u8mf2(vuint8mf2_t vs2, size_t vl) { @@ -2350,12 +2350,12 @@ vuint8mf2_t test_sf_vc_v_iv_u8mf2(vuint8mf2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv8i8.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv8i8.i32.nxv8i8.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv8i8.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv8i8.i64.nxv8i8.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_sf_vc_v_iv_u8m1(vuint8m1_t vs2, size_t vl) { @@ -2364,12 +2364,12 @@ vuint8m1_t test_sf_vc_v_iv_u8m1(vuint8m1_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv16i8.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv16i8.i32.nxv16i8.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv16i8.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv16i8.i64.nxv16i8.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_sf_vc_v_iv_u8m2(vuint8m2_t vs2, size_t vl) { @@ -2378,12 +2378,12 @@ vuint8m2_t test_sf_vc_v_iv_u8m2(vuint8m2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv32i8.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv32i8.i32.nxv32i8.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv32i8.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv32i8.i64.nxv32i8.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_sf_vc_v_iv_u8m4(vuint8m4_t vs2, size_t vl) { @@ -2392,12 +2392,12 @@ vuint8m4_t test_sf_vc_v_iv_u8m4(vuint8m4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv64i8.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv64i8.i32.nxv64i8.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv64i8.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv64i8.i64.nxv64i8.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_sf_vc_v_iv_u8m8(vuint8m8_t vs2, size_t vl) { @@ -2406,12 +2406,12 @@ vuint8m8_t test_sf_vc_v_iv_u8m8(vuint8m8_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv1i16.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv1i16.i32.nxv1i16.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv1i16.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv1i16.i64.nxv1i16.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_sf_vc_v_iv_u16mf4(vuint16mf4_t vs2, size_t vl) { @@ -2420,12 +2420,12 @@ vuint16mf4_t test_sf_vc_v_iv_u16mf4(vuint16mf4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv2i16.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv2i16.i32.nxv2i16.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv2i16.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv2i16.i64.nxv2i16.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_sf_vc_v_iv_u16mf2(vuint16mf2_t vs2, size_t vl) { @@ -2434,12 +2434,12 @@ vuint16mf2_t test_sf_vc_v_iv_u16mf2(vuint16mf2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv4i16.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv4i16.i32.nxv4i16.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv4i16.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv4i16.i64.nxv4i16.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_sf_vc_v_iv_u16m1(vuint16m1_t vs2, size_t vl) { @@ -2448,12 +2448,12 @@ vuint16m1_t test_sf_vc_v_iv_u16m1(vuint16m1_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv8i16.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv8i16.i32.nxv8i16.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv8i16.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv8i16.i64.nxv8i16.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_sf_vc_v_iv_u16m2(vuint16m2_t vs2, size_t vl) { @@ -2462,12 +2462,12 @@ vuint16m2_t test_sf_vc_v_iv_u16m2(vuint16m2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv16i16.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv16i16.i32.nxv16i16.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv16i16.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv16i16.i64.nxv16i16.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_sf_vc_v_iv_u16m4(vuint16m4_t vs2, size_t vl) { @@ -2476,12 +2476,12 @@ vuint16m4_t test_sf_vc_v_iv_u16m4(vuint16m4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv32i16.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv32i16.i32.nxv32i16.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv32i16.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv32i16.i64.nxv32i16.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_sf_vc_v_iv_u16m8(vuint16m8_t vs2, size_t vl) { @@ -2490,12 +2490,12 @@ vuint16m8_t test_sf_vc_v_iv_u16m8(vuint16m8_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv1i32.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv1i32.i32.nxv1i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv1i32.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv1i32.i64.nxv1i32.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_sf_vc_v_iv_u32mf2(vuint32mf2_t vs2, size_t vl) { @@ -2504,12 +2504,12 @@ vuint32mf2_t test_sf_vc_v_iv_u32mf2(vuint32mf2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv2i32.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv2i32.i32.nxv2i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv2i32.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv2i32.i64.nxv2i32.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_sf_vc_v_iv_u32m1(vuint32m1_t vs2, size_t vl) { @@ -2518,12 +2518,12 @@ vuint32m1_t test_sf_vc_v_iv_u32m1(vuint32m1_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv4i32.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv4i32.i32.nxv4i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv4i32.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv4i32.i64.nxv4i32.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_sf_vc_v_iv_u32m2(vuint32m2_t vs2, size_t vl) { @@ -2532,12 +2532,12 @@ vuint32m2_t test_sf_vc_v_iv_u32m2(vuint32m2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv8i32.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv8i32.i32.nxv8i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv8i32.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv8i32.i64.nxv8i32.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_sf_vc_v_iv_u32m4(vuint32m4_t vs2, size_t vl) { @@ -2546,12 +2546,12 @@ vuint32m4_t test_sf_vc_v_iv_u32m4(vuint32m4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv16i32.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv16i32.i32.nxv16i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv16i32.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv16i32.i64.nxv16i32.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_sf_vc_v_iv_u32m8(vuint32m8_t vs2, size_t vl) { @@ -2560,12 +2560,12 @@ vuint32m8_t test_sf_vc_v_iv_u32m8(vuint32m8_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv1i64.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv1i64.i32.nxv1i64.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv1i64.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv1i64.i64.nxv1i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_sf_vc_v_iv_u64m1(vuint64m1_t vs2, size_t vl) { @@ -2574,12 +2574,12 @@ vuint64m1_t test_sf_vc_v_iv_u64m1(vuint64m1_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv2i64.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv2i64.i32.nxv2i64.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv2i64.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv2i64.i64.nxv2i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_sf_vc_v_iv_u64m2(vuint64m2_t vs2, size_t vl) { @@ -2588,12 +2588,12 @@ vuint64m2_t test_sf_vc_v_iv_u64m2(vuint64m2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv4i64.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv4i64.i32.nxv4i64.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv4i64.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv4i64.i64.nxv4i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_sf_vc_v_iv_u64m4(vuint64m4_t vs2, size_t vl) { @@ -2602,12 +2602,12 @@ vuint64m4_t test_sf_vc_v_iv_u64m4(vuint64m4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_iv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv8i64.i32.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv8i64.i32.nxv8i64.i32.i32(i32 3, [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_iv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv8i64.i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.iv.nxv8i64.i64.nxv8i64.i64.i64(i64 3, [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_sf_vc_v_iv_u64m8(vuint64m8_t vs2, size_t vl) { @@ -2826,12 +2826,12 @@ void test_sf_vc_fv_se_u64m8(vuint64m8_t vs2, double fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv1i16.i32.f16.i32(i32 1, [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv1i16.i32.nxv1i16.f16.i32(i32 1, [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv1i16.i64.f16.i64(i64 1, [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv1i16.i64.nxv1i16.f16.i64(i64 1, [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_sf_vc_v_fv_se_u16mf4(vuint16mf4_t vs2, _Float16 fs1, size_t vl) { @@ -2840,12 +2840,12 @@ vuint16mf4_t test_sf_vc_v_fv_se_u16mf4(vuint16mf4_t vs2, _Float16 fs1, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv2i16.i32.f16.i32(i32 1, [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv2i16.i32.nxv2i16.f16.i32(i32 1, [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv2i16.i64.f16.i64(i64 1, [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv2i16.i64.nxv2i16.f16.i64(i64 1, [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_sf_vc_v_fv_se_u16mf2(vuint16mf2_t vs2, _Float16 fs1, size_t vl) { @@ -2854,12 +2854,12 @@ vuint16mf2_t test_sf_vc_v_fv_se_u16mf2(vuint16mf2_t vs2, _Float16 fs1, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv4i16.i32.f16.i32(i32 1, [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv4i16.i32.nxv4i16.f16.i32(i32 1, [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv4i16.i64.f16.i64(i64 1, [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv4i16.i64.nxv4i16.f16.i64(i64 1, [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_sf_vc_v_fv_se_u16m1(vuint16m1_t vs2, _Float16 fs1, size_t vl) { @@ -2868,12 +2868,12 @@ vuint16m1_t test_sf_vc_v_fv_se_u16m1(vuint16m1_t vs2, _Float16 fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv8i16.i32.f16.i32(i32 1, [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv8i16.i32.nxv8i16.f16.i32(i32 1, [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv8i16.i64.f16.i64(i64 1, [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv8i16.i64.nxv8i16.f16.i64(i64 1, [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_sf_vc_v_fv_se_u16m2(vuint16m2_t vs2, _Float16 fs1, size_t vl) { @@ -2882,12 +2882,12 @@ vuint16m2_t test_sf_vc_v_fv_se_u16m2(vuint16m2_t vs2, _Float16 fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv16i16.i32.f16.i32(i32 1, [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv16i16.i32.nxv16i16.f16.i32(i32 1, [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv16i16.i64.f16.i64(i64 1, [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv16i16.i64.nxv16i16.f16.i64(i64 1, [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_sf_vc_v_fv_se_u16m4(vuint16m4_t vs2, _Float16 fs1, size_t vl) { @@ -2896,12 +2896,12 @@ vuint16m4_t test_sf_vc_v_fv_se_u16m4(vuint16m4_t vs2, _Float16 fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv32i16.i32.f16.i32(i32 1, [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv32i16.i32.nxv32i16.f16.i32(i32 1, [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv32i16.i64.f16.i64(i64 1, [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv32i16.i64.nxv32i16.f16.i64(i64 1, [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_sf_vc_v_fv_se_u16m8(vuint16m8_t vs2, _Float16 fs1, size_t vl) { @@ -2910,12 +2910,12 @@ vuint16m8_t test_sf_vc_v_fv_se_u16m8(vuint16m8_t vs2, _Float16 fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv1i32.i32.f32.i32(i32 1, [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv1i32.i32.nxv1i32.f32.i32(i32 1, [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv1i32.i64.f32.i64(i64 1, [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv1i32.i64.nxv1i32.f32.i64(i64 1, [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_sf_vc_v_fv_se_u32mf2(vuint32mf2_t vs2, float fs1, size_t vl) { @@ -2924,12 +2924,12 @@ vuint32mf2_t test_sf_vc_v_fv_se_u32mf2(vuint32mf2_t vs2, float fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv2i32.i32.f32.i32(i32 1, [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv2i32.i32.nxv2i32.f32.i32(i32 1, [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv2i32.i64.f32.i64(i64 1, [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv2i32.i64.nxv2i32.f32.i64(i64 1, [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_sf_vc_v_fv_se_u32m1(vuint32m1_t vs2, float fs1, size_t vl) { @@ -2938,12 +2938,12 @@ vuint32m1_t test_sf_vc_v_fv_se_u32m1(vuint32m1_t vs2, float fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv4i32.i32.f32.i32(i32 1, [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv4i32.i32.nxv4i32.f32.i32(i32 1, [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv4i32.i64.f32.i64(i64 1, [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv4i32.i64.nxv4i32.f32.i64(i64 1, [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_sf_vc_v_fv_se_u32m2(vuint32m2_t vs2, float fs1, size_t vl) { @@ -2952,12 +2952,12 @@ vuint32m2_t test_sf_vc_v_fv_se_u32m2(vuint32m2_t vs2, float fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv8i32.i32.f32.i32(i32 1, [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv8i32.i32.nxv8i32.f32.i32(i32 1, [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv8i32.i64.f32.i64(i64 1, [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv8i32.i64.nxv8i32.f32.i64(i64 1, [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_sf_vc_v_fv_se_u32m4(vuint32m4_t vs2, float fs1, size_t vl) { @@ -2966,12 +2966,12 @@ vuint32m4_t test_sf_vc_v_fv_se_u32m4(vuint32m4_t vs2, float fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv16i32.i32.f32.i32(i32 1, [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv16i32.i32.nxv16i32.f32.i32(i32 1, [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv16i32.i64.f32.i64(i64 1, [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv16i32.i64.nxv16i32.f32.i64(i64 1, [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_sf_vc_v_fv_se_u32m8(vuint32m8_t vs2, float fs1, size_t vl) { @@ -2980,12 +2980,12 @@ vuint32m8_t test_sf_vc_v_fv_se_u32m8(vuint32m8_t vs2, float fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv1i64.i32.f64.i32(i32 1, [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv1i64.i32.nxv1i64.f64.i32(i32 1, [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv1i64.i64.f64.i64(i64 1, [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv1i64.i64.nxv1i64.f64.i64(i64 1, [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_sf_vc_v_fv_se_u64m1(vuint64m1_t vs2, double fs1, size_t vl) { @@ -2994,12 +2994,12 @@ vuint64m1_t test_sf_vc_v_fv_se_u64m1(vuint64m1_t vs2, double fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv2i64.i32.f64.i32(i32 1, [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv2i64.i32.nxv2i64.f64.i32(i32 1, [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv2i64.i64.f64.i64(i64 1, [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv2i64.i64.nxv2i64.f64.i64(i64 1, [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_sf_vc_v_fv_se_u64m2(vuint64m2_t vs2, double fs1, size_t vl) { @@ -3008,12 +3008,12 @@ vuint64m2_t test_sf_vc_v_fv_se_u64m2(vuint64m2_t vs2, double fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv4i64.i32.f64.i32(i32 1, [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv4i64.i32.nxv4i64.f64.i32(i32 1, [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv4i64.i64.f64.i64(i64 1, [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv4i64.i64.nxv4i64.f64.i64(i64 1, [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_sf_vc_v_fv_se_u64m4(vuint64m4_t vs2, double fs1, size_t vl) { @@ -3022,12 +3022,12 @@ vuint64m4_t test_sf_vc_v_fv_se_u64m4(vuint64m4_t vs2, double fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_se_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv8i64.i32.f64.i32(i32 1, [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv8i64.i32.nxv8i64.f64.i32(i32 1, [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_se_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv8i64.i64.f64.i64(i64 1, [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.se.nxv8i64.i64.nxv8i64.f64.i64(i64 1, [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_sf_vc_v_fv_se_u64m8(vuint64m8_t vs2, double fs1, size_t vl) { @@ -3036,12 +3036,12 @@ vuint64m8_t test_sf_vc_v_fv_se_u64m8(vuint64m8_t vs2, double fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv1i16.i32.f16.i32(i32 1, [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv1i16.i32.nxv1i16.f16.i32(i32 1, [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv1i16.i64.f16.i64(i64 1, [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv1i16.i64.nxv1i16.f16.i64(i64 1, [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_sf_vc_v_fv_u16mf4(vuint16mf4_t vs2, _Float16 fs1, size_t vl) { @@ -3050,12 +3050,12 @@ vuint16mf4_t test_sf_vc_v_fv_u16mf4(vuint16mf4_t vs2, _Float16 fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv2i16.i32.f16.i32(i32 1, [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv2i16.i32.nxv2i16.f16.i32(i32 1, [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv2i16.i64.f16.i64(i64 1, [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv2i16.i64.nxv2i16.f16.i64(i64 1, [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_sf_vc_v_fv_u16mf2(vuint16mf2_t vs2, _Float16 fs1, size_t vl) { @@ -3064,12 +3064,12 @@ vuint16mf2_t test_sf_vc_v_fv_u16mf2(vuint16mf2_t vs2, _Float16 fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv4i16.i32.f16.i32(i32 1, [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv4i16.i32.nxv4i16.f16.i32(i32 1, [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv4i16.i64.f16.i64(i64 1, [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv4i16.i64.nxv4i16.f16.i64(i64 1, [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_sf_vc_v_fv_u16m1(vuint16m1_t vs2, _Float16 fs1, size_t vl) { @@ -3078,12 +3078,12 @@ vuint16m1_t test_sf_vc_v_fv_u16m1(vuint16m1_t vs2, _Float16 fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv8i16.i32.f16.i32(i32 1, [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv8i16.i32.nxv8i16.f16.i32(i32 1, [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv8i16.i64.f16.i64(i64 1, [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv8i16.i64.nxv8i16.f16.i64(i64 1, [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_sf_vc_v_fv_u16m2(vuint16m2_t vs2, _Float16 fs1, size_t vl) { @@ -3092,12 +3092,12 @@ vuint16m2_t test_sf_vc_v_fv_u16m2(vuint16m2_t vs2, _Float16 fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv16i16.i32.f16.i32(i32 1, [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv16i16.i32.nxv16i16.f16.i32(i32 1, [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv16i16.i64.f16.i64(i64 1, [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv16i16.i64.nxv16i16.f16.i64(i64 1, [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_sf_vc_v_fv_u16m4(vuint16m4_t vs2, _Float16 fs1, size_t vl) { @@ -3106,12 +3106,12 @@ vuint16m4_t test_sf_vc_v_fv_u16m4(vuint16m4_t vs2, _Float16 fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv32i16.i32.f16.i32(i32 1, [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv32i16.i32.nxv32i16.f16.i32(i32 1, [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv32i16.i64.f16.i64(i64 1, [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv32i16.i64.nxv32i16.f16.i64(i64 1, [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_sf_vc_v_fv_u16m8(vuint16m8_t vs2, _Float16 fs1, size_t vl) { @@ -3120,12 +3120,12 @@ vuint16m8_t test_sf_vc_v_fv_u16m8(vuint16m8_t vs2, _Float16 fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv1i32.i32.f32.i32(i32 1, [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv1i32.i32.nxv1i32.f32.i32(i32 1, [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv1i32.i64.f32.i64(i64 1, [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv1i32.i64.nxv1i32.f32.i64(i64 1, [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_sf_vc_v_fv_u32mf2(vuint32mf2_t vs2, float fs1, size_t vl) { @@ -3134,12 +3134,12 @@ vuint32mf2_t test_sf_vc_v_fv_u32mf2(vuint32mf2_t vs2, float fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv2i32.i32.f32.i32(i32 1, [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv2i32.i32.nxv2i32.f32.i32(i32 1, [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv2i32.i64.f32.i64(i64 1, [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv2i32.i64.nxv2i32.f32.i64(i64 1, [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_sf_vc_v_fv_u32m1(vuint32m1_t vs2, float fs1, size_t vl) { @@ -3148,12 +3148,12 @@ vuint32m1_t test_sf_vc_v_fv_u32m1(vuint32m1_t vs2, float fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv4i32.i32.f32.i32(i32 1, [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv4i32.i32.nxv4i32.f32.i32(i32 1, [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv4i32.i64.f32.i64(i64 1, [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv4i32.i64.nxv4i32.f32.i64(i64 1, [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_sf_vc_v_fv_u32m2(vuint32m2_t vs2, float fs1, size_t vl) { @@ -3162,12 +3162,12 @@ vuint32m2_t test_sf_vc_v_fv_u32m2(vuint32m2_t vs2, float fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv8i32.i32.f32.i32(i32 1, [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv8i32.i32.nxv8i32.f32.i32(i32 1, [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv8i32.i64.f32.i64(i64 1, [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv8i32.i64.nxv8i32.f32.i64(i64 1, [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_sf_vc_v_fv_u32m4(vuint32m4_t vs2, float fs1, size_t vl) { @@ -3176,12 +3176,12 @@ vuint32m4_t test_sf_vc_v_fv_u32m4(vuint32m4_t vs2, float fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv16i32.i32.f32.i32(i32 1, [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv16i32.i32.nxv16i32.f32.i32(i32 1, [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv16i32.i64.f32.i64(i64 1, [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv16i32.i64.nxv16i32.f32.i64(i64 1, [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_sf_vc_v_fv_u32m8(vuint32m8_t vs2, float fs1, size_t vl) { @@ -3190,12 +3190,12 @@ vuint32m8_t test_sf_vc_v_fv_u32m8(vuint32m8_t vs2, float fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv1i64.i32.f64.i32(i32 1, [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv1i64.i32.nxv1i64.f64.i32(i32 1, [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv1i64.i64.f64.i64(i64 1, [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv1i64.i64.nxv1i64.f64.i64(i64 1, [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_sf_vc_v_fv_u64m1(vuint64m1_t vs2, double fs1, size_t vl) { @@ -3204,12 +3204,12 @@ vuint64m1_t test_sf_vc_v_fv_u64m1(vuint64m1_t vs2, double fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv2i64.i32.f64.i32(i32 1, [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv2i64.i32.nxv2i64.f64.i32(i32 1, [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv2i64.i64.f64.i64(i64 1, [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv2i64.i64.nxv2i64.f64.i64(i64 1, [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_sf_vc_v_fv_u64m2(vuint64m2_t vs2, double fs1, size_t vl) { @@ -3218,12 +3218,12 @@ vuint64m2_t test_sf_vc_v_fv_u64m2(vuint64m2_t vs2, double fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv4i64.i32.f64.i32(i32 1, [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv4i64.i32.nxv4i64.f64.i32(i32 1, [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv4i64.i64.f64.i64(i64 1, [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv4i64.i64.nxv4i64.f64.i64(i64 1, [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_sf_vc_v_fv_u64m4(vuint64m4_t vs2, double fs1, size_t vl) { @@ -3232,12 +3232,12 @@ vuint64m4_t test_sf_vc_v_fv_u64m4(vuint64m4_t vs2, double fs1, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_fv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv8i64.i32.f64.i32(i32 1, [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv8i64.i32.nxv8i64.f64.i32(i32 1, [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv8i64.i64.f64.i64(i64 1, [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fv.nxv8i64.i64.nxv8i64.f64.i64(i64 1, [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_sf_vc_v_fv_u64m8(vuint64m8_t vs2, double fs1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv-rv64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv-rv64.c index 8b0c73776948e..eac2ba32d5b30 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv-rv64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv-rv64.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv1i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv1i64.nxv1i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_xvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { @@ -17,7 +17,7 @@ void test_sf_vc_xvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv2i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv2i64.nxv2i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_xvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { @@ -26,7 +26,7 @@ void test_sf_vc_xvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv4i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv4i64.nxv4i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_xvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { @@ -35,7 +35,7 @@ void test_sf_vc_xvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv8i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv8i64.nxv8i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_xvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { @@ -44,7 +44,7 @@ void test_sf_vc_xvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv1i64.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv1i64.i64.nxv1i64.nxv1i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_sf_vc_v_xvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { @@ -53,7 +53,7 @@ vuint64m1_t test_sf_vc_v_xvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, uint64_t // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv2i64.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv2i64.i64.nxv2i64.nxv2i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_sf_vc_v_xvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { @@ -62,7 +62,7 @@ vuint64m2_t test_sf_vc_v_xvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, uint64_t // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv4i64.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv4i64.i64.nxv4i64.nxv4i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_sf_vc_v_xvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { @@ -71,7 +71,7 @@ vuint64m4_t test_sf_vc_v_xvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, uint64_t // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv8i64.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv8i64.i64.nxv8i64.nxv8i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_sf_vc_v_xvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { @@ -80,7 +80,7 @@ vuint64m8_t test_sf_vc_v_xvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, uint64_t // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv1i64.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv1i64.i64.nxv1i64.nxv1i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_sf_vc_v_xvv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { @@ -89,7 +89,7 @@ vuint64m1_t test_sf_vc_v_xvv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv2i64.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv2i64.i64.nxv2i64.nxv2i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_sf_vc_v_xvv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { @@ -98,7 +98,7 @@ vuint64m2_t test_sf_vc_v_xvv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv4i64.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv4i64.i64.nxv4i64.nxv4i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_sf_vc_v_xvv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { @@ -107,7 +107,7 @@ vuint64m4_t test_sf_vc_v_xvv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv8i64.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv8i64.i64.nxv8i64.nxv8i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_sf_vc_v_xvv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv.c index 4efd7da81bac4..10169692072e2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvv.c @@ -11,12 +11,12 @@ // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i8.nxv1i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i8.nxv1i8.nxv1i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv1i8.nxv1i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv1i8.nxv1i8.nxv1i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_vvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { @@ -25,12 +25,12 @@ void test_sf_vc_vvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, s // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i8.nxv2i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i8.nxv2i8.nxv2i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv2i8.nxv2i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv2i8.nxv2i8.nxv2i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_vvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { @@ -39,12 +39,12 @@ void test_sf_vc_vvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, s // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i8.nxv4i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i8.nxv4i8.nxv4i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv4i8.nxv4i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv4i8.nxv4i8.nxv4i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_vvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { @@ -53,12 +53,12 @@ void test_sf_vc_vvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, s // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i8.nxv8i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i8.nxv8i8.nxv8i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv8i8.nxv8i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv8i8.nxv8i8.nxv8i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_vvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { @@ -67,12 +67,12 @@ void test_sf_vc_vvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_ // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv16i8.nxv16i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv16i8.nxv16i8.nxv16i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv16i8.nxv16i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv16i8.nxv16i8.nxv16i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_vvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { @@ -81,12 +81,12 @@ void test_sf_vc_vvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_ // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv32i8.nxv32i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv32i8.nxv32i8.nxv32i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv32i8.nxv32i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv32i8.nxv32i8.nxv32i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_vvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { @@ -95,12 +95,12 @@ void test_sf_vc_vvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_ // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv64i8.nxv64i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv64i8.nxv64i8.nxv64i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv64i8.nxv64i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv64i8.nxv64i8.nxv64i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_vvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { @@ -109,12 +109,12 @@ void test_sf_vc_vvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_ // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i16.nxv1i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i16.nxv1i16.nxv1i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv1i16.nxv1i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv1i16.nxv1i16.nxv1i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_vvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { @@ -123,12 +123,12 @@ void test_sf_vc_vvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i16.nxv2i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i16.nxv2i16.nxv2i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv2i16.nxv2i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv2i16.nxv2i16.nxv2i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_vvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { @@ -137,12 +137,12 @@ void test_sf_vc_vvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i16.nxv4i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i16.nxv4i16.nxv4i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv4i16.nxv4i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv4i16.nxv4i16.nxv4i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_vvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { @@ -151,12 +151,12 @@ void test_sf_vc_vvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, s // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i16.nxv8i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i16.nxv8i16.nxv8i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv8i16.nxv8i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv8i16.nxv8i16.nxv8i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_vvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { @@ -165,12 +165,12 @@ void test_sf_vc_vvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, s // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv16i16.nxv16i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv16i16.nxv16i16.nxv16i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv16i16.nxv16i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv16i16.nxv16i16.nxv16i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_vvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { @@ -179,12 +179,12 @@ void test_sf_vc_vvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, s // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv32i16.nxv32i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv32i16.nxv32i16.nxv32i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv32i16.nxv32i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv32i16.nxv32i16.nxv32i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_vvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { @@ -193,12 +193,12 @@ void test_sf_vc_vvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, s // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i32.nxv1i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i32.nxv1i32.nxv1i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv1i32.nxv1i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv1i32.nxv1i32.nxv1i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_vvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { @@ -207,12 +207,12 @@ void test_sf_vc_vvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i32.nxv2i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i32.nxv2i32.nxv2i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv2i32.nxv2i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv2i32.nxv2i32.nxv2i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_vvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { @@ -221,12 +221,12 @@ void test_sf_vc_vvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, s // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i32.nxv4i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i32.nxv4i32.nxv4i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv4i32.nxv4i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv4i32.nxv4i32.nxv4i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_vvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { @@ -235,12 +235,12 @@ void test_sf_vc_vvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, s // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i32.nxv8i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i32.nxv8i32.nxv8i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv8i32.nxv8i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv8i32.nxv8i32.nxv8i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_vvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { @@ -249,12 +249,12 @@ void test_sf_vc_vvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, s // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv16i32.nxv16i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv16i32.nxv16i32.nxv16i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv16i32.nxv16i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv16i32.nxv16i32.nxv16i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_vvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { @@ -263,12 +263,12 @@ void test_sf_vc_vvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, s // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i64.nxv1i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i64.nxv1i64.nxv1i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv1i64.nxv1i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv1i64.nxv1i64.nxv1i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_vvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { @@ -277,12 +277,12 @@ void test_sf_vc_vvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, s // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i64.nxv2i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i64.nxv2i64.nxv2i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv2i64.nxv2i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv2i64.nxv2i64.nxv2i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_vvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { @@ -291,12 +291,12 @@ void test_sf_vc_vvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, s // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i64.nxv4i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i64.nxv4i64.nxv4i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv4i64.nxv4i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv4i64.nxv4i64.nxv4i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_vvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { @@ -305,12 +305,12 @@ void test_sf_vc_vvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, s // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i64.nxv8i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i64.nxv8i64.nxv8i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv8i64.nxv8i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv8i64.nxv8i64.nxv8i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_vvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { @@ -319,12 +319,12 @@ void test_sf_vc_vvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, s // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.i32.nxv1i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.i32.nxv1i8.nxv1i8.nxv1i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.i64.nxv1i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.i64.nxv1i8.nxv1i8.nxv1i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_sf_vc_v_vvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { @@ -333,12 +333,12 @@ vuint8mf8_t test_sf_vc_v_vvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.i32.nxv2i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.i32.nxv2i8.nxv2i8.nxv2i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.i64.nxv2i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.i64.nxv2i8.nxv2i8.nxv2i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_sf_vc_v_vvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { @@ -347,12 +347,12 @@ vuint8mf4_t test_sf_vc_v_vvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.i32.nxv4i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.i32.nxv4i8.nxv4i8.nxv4i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.i64.nxv4i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.i64.nxv4i8.nxv4i8.nxv4i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_sf_vc_v_vvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { @@ -361,12 +361,12 @@ vuint8mf2_t test_sf_vc_v_vvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.i32.nxv8i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.i32.nxv8i8.nxv8i8.nxv8i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.i64.nxv8i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.i64.nxv8i8.nxv8i8.nxv8i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_sf_vc_v_vvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { @@ -375,12 +375,12 @@ vuint8m1_t test_sf_vc_v_vvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.i32.nxv16i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.i32.nxv16i8.nxv16i8.nxv16i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.i64.nxv16i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.i64.nxv16i8.nxv16i8.nxv16i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_sf_vc_v_vvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { @@ -389,12 +389,12 @@ vuint8m2_t test_sf_vc_v_vvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.i32.nxv32i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.i32.nxv32i8.nxv32i8.nxv32i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.i64.nxv32i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.i64.nxv32i8.nxv32i8.nxv32i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_sf_vc_v_vvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { @@ -403,12 +403,12 @@ vuint8m4_t test_sf_vc_v_vvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.i32.nxv64i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.i32.nxv64i8.nxv64i8.nxv64i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.i64.nxv64i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.i64.nxv64i8.nxv64i8.nxv64i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_sf_vc_v_vvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { @@ -417,12 +417,12 @@ vuint8m8_t test_sf_vc_v_vvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.i32.nxv1i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.i32.nxv1i16.nxv1i16.nxv1i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.i64.nxv1i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.i64.nxv1i16.nxv1i16.nxv1i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_sf_vc_v_vvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { @@ -431,12 +431,12 @@ vuint16mf4_t test_sf_vc_v_vvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, vuint // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.i32.nxv2i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.i32.nxv2i16.nxv2i16.nxv2i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.i64.nxv2i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.i64.nxv2i16.nxv2i16.nxv2i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_sf_vc_v_vvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { @@ -445,12 +445,12 @@ vuint16mf2_t test_sf_vc_v_vvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, vuint // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.i32.nxv4i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.i32.nxv4i16.nxv4i16.nxv4i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.i64.nxv4i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.i64.nxv4i16.nxv4i16.nxv4i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_sf_vc_v_vvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { @@ -459,12 +459,12 @@ vuint16m1_t test_sf_vc_v_vvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.i32.nxv8i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.i32.nxv8i16.nxv8i16.nxv8i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.i64.nxv8i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.i64.nxv8i16.nxv8i16.nxv8i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_sf_vc_v_vvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { @@ -473,12 +473,12 @@ vuint16m2_t test_sf_vc_v_vvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.i32.nxv16i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.i32.nxv16i16.nxv16i16.nxv16i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.i64.nxv16i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.i64.nxv16i16.nxv16i16.nxv16i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_sf_vc_v_vvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { @@ -487,12 +487,12 @@ vuint16m4_t test_sf_vc_v_vvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.i32.nxv32i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.i32.nxv32i16.nxv32i16.nxv32i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.i64.nxv32i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.i64.nxv32i16.nxv32i16.nxv32i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_sf_vc_v_vvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { @@ -501,12 +501,12 @@ vuint16m8_t test_sf_vc_v_vvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.i32.nxv1i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.i32.nxv1i32.nxv1i32.nxv1i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.i64.nxv1i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.i64.nxv1i32.nxv1i32.nxv1i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_sf_vc_v_vvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { @@ -515,12 +515,12 @@ vuint32mf2_t test_sf_vc_v_vvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.i32.nxv2i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.i32.nxv2i32.nxv2i32.nxv2i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.i64.nxv2i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.i64.nxv2i32.nxv2i32.nxv2i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_sf_vc_v_vvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { @@ -529,12 +529,12 @@ vuint32m1_t test_sf_vc_v_vvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.i32.nxv4i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.i32.nxv4i32.nxv4i32.nxv4i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.i64.nxv4i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.i64.nxv4i32.nxv4i32.nxv4i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_sf_vc_v_vvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { @@ -543,12 +543,12 @@ vuint32m2_t test_sf_vc_v_vvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.i32.nxv8i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.i32.nxv8i32.nxv8i32.nxv8i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.i64.nxv8i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.i64.nxv8i32.nxv8i32.nxv8i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_sf_vc_v_vvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { @@ -557,12 +557,12 @@ vuint32m4_t test_sf_vc_v_vvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.i32.nxv16i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.i32.nxv16i32.nxv16i32.nxv16i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.i64.nxv16i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.i64.nxv16i32.nxv16i32.nxv16i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_sf_vc_v_vvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { @@ -571,12 +571,12 @@ vuint32m8_t test_sf_vc_v_vvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.i32.nxv1i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.i32.nxv1i64.nxv1i64.nxv1i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.i64.nxv1i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.i64.nxv1i64.nxv1i64.nxv1i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_sf_vc_v_vvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { @@ -585,12 +585,12 @@ vuint64m1_t test_sf_vc_v_vvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.i32.nxv2i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.i32.nxv2i64.nxv2i64.nxv2i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.i64.nxv2i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.i64.nxv2i64.nxv2i64.nxv2i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_sf_vc_v_vvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { @@ -599,12 +599,12 @@ vuint64m2_t test_sf_vc_v_vvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.i32.nxv4i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.i32.nxv4i64.nxv4i64.nxv4i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.i64.nxv4i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.i64.nxv4i64.nxv4i64.nxv4i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_sf_vc_v_vvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { @@ -613,12 +613,12 @@ vuint64m4_t test_sf_vc_v_vvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.i32.nxv8i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.i32.nxv8i64.nxv8i64.nxv8i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.i64.nxv8i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.i64.nxv8i64.nxv8i64.nxv8i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_sf_vc_v_vvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { @@ -627,12 +627,12 @@ vuint64m8_t test_sf_vc_v_vvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv1i8.i32.nxv1i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv1i8.i32.nxv1i8.nxv1i8.nxv1i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv1i8.i64.nxv1i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv1i8.i64.nxv1i8.nxv1i8.nxv1i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_sf_vc_v_vvv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { @@ -641,12 +641,12 @@ vuint8mf8_t test_sf_vc_v_vvv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv2i8.i32.nxv2i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv2i8.i32.nxv2i8.nxv2i8.nxv2i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv2i8.i64.nxv2i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv2i8.i64.nxv2i8.nxv2i8.nxv2i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_sf_vc_v_vvv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { @@ -655,12 +655,12 @@ vuint8mf4_t test_sf_vc_v_vvv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv4i8.i32.nxv4i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv4i8.i32.nxv4i8.nxv4i8.nxv4i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv4i8.i64.nxv4i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv4i8.i64.nxv4i8.nxv4i8.nxv4i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_sf_vc_v_vvv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { @@ -669,12 +669,12 @@ vuint8mf2_t test_sf_vc_v_vvv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv8i8.i32.nxv8i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv8i8.i32.nxv8i8.nxv8i8.nxv8i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv8i8.i64.nxv8i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv8i8.i64.nxv8i8.nxv8i8.nxv8i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_sf_vc_v_vvv_u8m1(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { @@ -683,12 +683,12 @@ vuint8m1_t test_sf_vc_v_vvv_u8m1(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv16i8.i32.nxv16i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv16i8.i32.nxv16i8.nxv16i8.nxv16i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv16i8.i64.nxv16i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv16i8.i64.nxv16i8.nxv16i8.nxv16i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_sf_vc_v_vvv_u8m2(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { @@ -697,12 +697,12 @@ vuint8m2_t test_sf_vc_v_vvv_u8m2(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv32i8.i32.nxv32i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv32i8.i32.nxv32i8.nxv32i8.nxv32i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv32i8.i64.nxv32i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv32i8.i64.nxv32i8.nxv32i8.nxv32i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_sf_vc_v_vvv_u8m4(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { @@ -711,12 +711,12 @@ vuint8m4_t test_sf_vc_v_vvv_u8m4(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv64i8.i32.nxv64i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv64i8.i32.nxv64i8.nxv64i8.nxv64i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv64i8.i64.nxv64i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv64i8.i64.nxv64i8.nxv64i8.nxv64i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_sf_vc_v_vvv_u8m8(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { @@ -725,12 +725,12 @@ vuint8m8_t test_sf_vc_v_vvv_u8m8(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv1i16.i32.nxv1i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv1i16.i32.nxv1i16.nxv1i16.nxv1i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv1i16.i64.nxv1i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv1i16.i64.nxv1i16.nxv1i16.nxv1i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_sf_vc_v_vvv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { @@ -739,12 +739,12 @@ vuint16mf4_t test_sf_vc_v_vvv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16m // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv2i16.i32.nxv2i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv2i16.i32.nxv2i16.nxv2i16.nxv2i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv2i16.i64.nxv2i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv2i16.i64.nxv2i16.nxv2i16.nxv2i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_sf_vc_v_vvv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { @@ -753,12 +753,12 @@ vuint16mf2_t test_sf_vc_v_vvv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16m // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv4i16.i32.nxv4i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv4i16.i32.nxv4i16.nxv4i16.nxv4i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv4i16.i64.nxv4i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv4i16.i64.nxv4i16.nxv4i16.nxv4i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_sf_vc_v_vvv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { @@ -767,12 +767,12 @@ vuint16m1_t test_sf_vc_v_vvv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv8i16.i32.nxv8i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv8i16.i32.nxv8i16.nxv8i16.nxv8i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv8i16.i64.nxv8i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv8i16.i64.nxv8i16.nxv8i16.nxv8i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_sf_vc_v_vvv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { @@ -781,12 +781,12 @@ vuint16m2_t test_sf_vc_v_vvv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv16i16.i32.nxv16i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv16i16.i32.nxv16i16.nxv16i16.nxv16i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv16i16.i64.nxv16i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv16i16.i64.nxv16i16.nxv16i16.nxv16i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_sf_vc_v_vvv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { @@ -795,12 +795,12 @@ vuint16m4_t test_sf_vc_v_vvv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv32i16.i32.nxv32i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv32i16.i32.nxv32i16.nxv32i16.nxv32i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv32i16.i64.nxv32i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv32i16.i64.nxv32i16.nxv32i16.nxv32i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_sf_vc_v_vvv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { @@ -809,12 +809,12 @@ vuint16m8_t test_sf_vc_v_vvv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv1i32.i32.nxv1i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv1i32.i32.nxv1i32.nxv1i32.nxv1i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv1i32.i64.nxv1i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv1i32.i64.nxv1i32.nxv1i32.nxv1i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_sf_vc_v_vvv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { @@ -823,12 +823,12 @@ vuint32mf2_t test_sf_vc_v_vvv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32m // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv2i32.i32.nxv2i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv2i32.i32.nxv2i32.nxv2i32.nxv2i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv2i32.i64.nxv2i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv2i32.i64.nxv2i32.nxv2i32.nxv2i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_sf_vc_v_vvv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { @@ -837,12 +837,12 @@ vuint32m1_t test_sf_vc_v_vvv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv4i32.i32.nxv4i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv4i32.i32.nxv4i32.nxv4i32.nxv4i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv4i32.i64.nxv4i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv4i32.i64.nxv4i32.nxv4i32.nxv4i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_sf_vc_v_vvv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { @@ -851,12 +851,12 @@ vuint32m2_t test_sf_vc_v_vvv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv8i32.i32.nxv8i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv8i32.i32.nxv8i32.nxv8i32.nxv8i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv8i32.i64.nxv8i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv8i32.i64.nxv8i32.nxv8i32.nxv8i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_sf_vc_v_vvv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { @@ -865,12 +865,12 @@ vuint32m4_t test_sf_vc_v_vvv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv16i32.i32.nxv16i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv16i32.i32.nxv16i32.nxv16i32.nxv16i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv16i32.i64.nxv16i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv16i32.i64.nxv16i32.nxv16i32.nxv16i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_sf_vc_v_vvv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { @@ -879,12 +879,12 @@ vuint32m8_t test_sf_vc_v_vvv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv1i64.i32.nxv1i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv1i64.i32.nxv1i64.nxv1i64.nxv1i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv1i64.i64.nxv1i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv1i64.i64.nxv1i64.nxv1i64.nxv1i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_sf_vc_v_vvv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { @@ -893,12 +893,12 @@ vuint64m1_t test_sf_vc_v_vvv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv2i64.i32.nxv2i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv2i64.i32.nxv2i64.nxv2i64.nxv2i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv2i64.i64.nxv2i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv2i64.i64.nxv2i64.nxv2i64.nxv2i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_sf_vc_v_vvv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { @@ -907,12 +907,12 @@ vuint64m2_t test_sf_vc_v_vvv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv4i64.i32.nxv4i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv4i64.i32.nxv4i64.nxv4i64.nxv4i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv4i64.i64.nxv4i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv4i64.i64.nxv4i64.nxv4i64.nxv4i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_sf_vc_v_vvv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { @@ -921,12 +921,12 @@ vuint64m4_t test_sf_vc_v_vvv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv8i64.i32.nxv8i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv8i64.i32.nxv8i64.nxv8i64.nxv8i64.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv8i64.i64.nxv8i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvv.nxv8i64.i64.nxv8i64.nxv8i64.nxv8i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_sf_vc_v_vvv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { @@ -935,12 +935,12 @@ vuint64m8_t test_sf_vc_v_vvv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i8.nxv1i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv1i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv1i8.nxv1i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_xvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { @@ -949,12 +949,12 @@ void test_sf_vc_xvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_ // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i8.nxv2i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv2i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv2i8.nxv2i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_xvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { @@ -963,12 +963,12 @@ void test_sf_vc_xvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_ // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i8.nxv4i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv4i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv4i8.nxv4i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_xvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { @@ -977,12 +977,12 @@ void test_sf_vc_xvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_ // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i8.nxv8i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv8i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv8i8.nxv8i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_xvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { @@ -991,12 +991,12 @@ void test_sf_vc_xvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t v // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i8.nxv16i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv16i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv16i8.nxv16i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_xvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { @@ -1005,12 +1005,12 @@ void test_sf_vc_xvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t v // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv32i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv32i8.nxv32i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv32i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv32i8.nxv32i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_xvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { @@ -1019,12 +1019,12 @@ void test_sf_vc_xvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t v // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv64i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv64i8.nxv64i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv64i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv64i8.nxv64i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_xvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { @@ -1033,12 +1033,12 @@ void test_sf_vc_xvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t v // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i16.nxv1i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv1i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv1i16.nxv1i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_xvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { @@ -1047,12 +1047,12 @@ void test_sf_vc_xvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, s // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i16.nxv2i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv2i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv2i16.nxv2i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_xvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { @@ -1061,12 +1061,12 @@ void test_sf_vc_xvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, s // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i16.nxv4i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv4i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv4i16.nxv4i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_xvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { @@ -1075,12 +1075,12 @@ void test_sf_vc_xvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i16.nxv8i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv8i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv8i16.nxv8i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_xvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { @@ -1089,12 +1089,12 @@ void test_sf_vc_xvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i16.nxv16i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv16i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv16i16.nxv16i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_xvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { @@ -1103,12 +1103,12 @@ void test_sf_vc_xvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv32i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv32i16.nxv32i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv32i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv32i16.nxv32i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_xvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { @@ -1117,12 +1117,12 @@ void test_sf_vc_xvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i32.nxv1i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv1i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv1i32.nxv1i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_xvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { @@ -1131,12 +1131,12 @@ void test_sf_vc_xvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, s // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i32.nxv2i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv2i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv2i32.nxv2i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_xvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { @@ -1145,12 +1145,12 @@ void test_sf_vc_xvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i32.nxv4i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv4i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv4i32.nxv4i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_xvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { @@ -1159,12 +1159,12 @@ void test_sf_vc_xvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i32.nxv8i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv8i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv8i32.nxv8i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_xvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { @@ -1173,12 +1173,12 @@ void test_sf_vc_xvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i32.nxv16i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv16i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv16i32.nxv16i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_xvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { @@ -1187,12 +1187,12 @@ void test_sf_vc_xvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.i32.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.i32.nxv1i8.nxv1i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.i64.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.i64.nxv1i8.nxv1i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_sf_vc_v_xvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { @@ -1201,12 +1201,12 @@ vuint8mf8_t test_sf_vc_v_xvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t r // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.i32.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.i32.nxv2i8.nxv2i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.i64.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.i64.nxv2i8.nxv2i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_sf_vc_v_xvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { @@ -1215,12 +1215,12 @@ vuint8mf4_t test_sf_vc_v_xvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t r // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.i32.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.i32.nxv4i8.nxv4i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.i64.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.i64.nxv4i8.nxv4i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_sf_vc_v_xvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { @@ -1229,12 +1229,12 @@ vuint8mf2_t test_sf_vc_v_xvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t r // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.i32.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.i32.nxv8i8.nxv8i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.i64.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.i64.nxv8i8.nxv8i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_sf_vc_v_xvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { @@ -1243,12 +1243,12 @@ vuint8m1_t test_sf_vc_v_xvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.i32.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.i32.nxv16i8.nxv16i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.i64.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.i64.nxv16i8.nxv16i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_sf_vc_v_xvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { @@ -1257,12 +1257,12 @@ vuint8m2_t test_sf_vc_v_xvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.i32.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.i32.nxv32i8.nxv32i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.i64.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.i64.nxv32i8.nxv32i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_sf_vc_v_xvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { @@ -1271,12 +1271,12 @@ vuint8m4_t test_sf_vc_v_xvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.i32.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.i32.nxv64i8.nxv64i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.i64.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.i64.nxv64i8.nxv64i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_sf_vc_v_xvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { @@ -1285,12 +1285,12 @@ vuint8m8_t test_sf_vc_v_xvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.i32.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.i32.nxv1i16.nxv1i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.i64.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.i64.nxv1i16.nxv1i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_sf_vc_v_xvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { @@ -1299,12 +1299,12 @@ vuint16mf4_t test_sf_vc_v_xvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, uint1 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.i32.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.i32.nxv2i16.nxv2i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.i64.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.i64.nxv2i16.nxv2i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_sf_vc_v_xvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { @@ -1313,12 +1313,12 @@ vuint16mf2_t test_sf_vc_v_xvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, uint1 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.i32.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.i32.nxv4i16.nxv4i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.i64.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.i64.nxv4i16.nxv4i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_sf_vc_v_xvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { @@ -1327,12 +1327,12 @@ vuint16m1_t test_sf_vc_v_xvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, uint16_t // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.i32.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.i32.nxv8i16.nxv8i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.i64.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.i64.nxv8i16.nxv8i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_sf_vc_v_xvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { @@ -1341,12 +1341,12 @@ vuint16m2_t test_sf_vc_v_xvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, uint16_t // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.i32.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.i32.nxv16i16.nxv16i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.i64.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.i64.nxv16i16.nxv16i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_sf_vc_v_xvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { @@ -1355,12 +1355,12 @@ vuint16m4_t test_sf_vc_v_xvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, uint16_t // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.i32.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.i32.nxv32i16.nxv32i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.i64.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.i64.nxv32i16.nxv32i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_sf_vc_v_xvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { @@ -1369,12 +1369,12 @@ vuint16m8_t test_sf_vc_v_xvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, uint16_t // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.i32.nxv1i32.nxv1i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.i64.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.i64.nxv1i32.nxv1i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_sf_vc_v_xvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { @@ -1383,12 +1383,12 @@ vuint32mf2_t test_sf_vc_v_xvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, uint3 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.i32.nxv2i32.nxv2i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.i64.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.i64.nxv2i32.nxv2i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_sf_vc_v_xvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { @@ -1397,12 +1397,12 @@ vuint32m1_t test_sf_vc_v_xvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, uint32_t // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.i32.nxv4i32.nxv4i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.i64.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.i64.nxv4i32.nxv4i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_sf_vc_v_xvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { @@ -1411,12 +1411,12 @@ vuint32m2_t test_sf_vc_v_xvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, uint32_t // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.i32.nxv8i32.nxv8i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.i64.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.i64.nxv8i32.nxv8i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_sf_vc_v_xvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { @@ -1425,12 +1425,12 @@ vuint32m4_t test_sf_vc_v_xvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, uint32_t // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.i32.nxv16i32.nxv16i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.i64.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.i64.nxv16i32.nxv16i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_sf_vc_v_xvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { @@ -1439,12 +1439,12 @@ vuint32m8_t test_sf_vc_v_xvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, uint32_t // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv1i8.i32.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv1i8.i32.nxv1i8.nxv1i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv1i8.i64.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv1i8.i64.nxv1i8.nxv1i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_sf_vc_v_xvv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { @@ -1453,12 +1453,12 @@ vuint8mf8_t test_sf_vc_v_xvv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv2i8.i32.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv2i8.i32.nxv2i8.nxv2i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv2i8.i64.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv2i8.i64.nxv2i8.nxv2i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_sf_vc_v_xvv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { @@ -1467,12 +1467,12 @@ vuint8mf4_t test_sf_vc_v_xvv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv4i8.i32.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv4i8.i32.nxv4i8.nxv4i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv4i8.i64.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv4i8.i64.nxv4i8.nxv4i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_sf_vc_v_xvv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { @@ -1481,12 +1481,12 @@ vuint8mf2_t test_sf_vc_v_xvv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv8i8.i32.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv8i8.i32.nxv8i8.nxv8i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv8i8.i64.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv8i8.i64.nxv8i8.nxv8i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_sf_vc_v_xvv_u8m1(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { @@ -1495,12 +1495,12 @@ vuint8m1_t test_sf_vc_v_xvv_u8m1(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, siz // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv16i8.i32.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv16i8.i32.nxv16i8.nxv16i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv16i8.i64.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv16i8.i64.nxv16i8.nxv16i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_sf_vc_v_xvv_u8m2(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { @@ -1509,12 +1509,12 @@ vuint8m2_t test_sf_vc_v_xvv_u8m2(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, siz // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv32i8.i32.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv32i8.i32.nxv32i8.nxv32i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv32i8.i64.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv32i8.i64.nxv32i8.nxv32i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_sf_vc_v_xvv_u8m4(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { @@ -1523,12 +1523,12 @@ vuint8m4_t test_sf_vc_v_xvv_u8m4(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, siz // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv64i8.i32.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv64i8.i32.nxv64i8.nxv64i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv64i8.i64.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv64i8.i64.nxv64i8.nxv64i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_sf_vc_v_xvv_u8m8(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { @@ -1537,12 +1537,12 @@ vuint8m8_t test_sf_vc_v_xvv_u8m8(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, siz // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv1i16.i32.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv1i16.i32.nxv1i16.nxv1i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv1i16.i64.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv1i16.i64.nxv1i16.nxv1i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_sf_vc_v_xvv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { @@ -1551,12 +1551,12 @@ vuint16mf4_t test_sf_vc_v_xvv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv2i16.i32.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv2i16.i32.nxv2i16.nxv2i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv2i16.i64.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv2i16.i64.nxv2i16.nxv2i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_sf_vc_v_xvv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { @@ -1565,12 +1565,12 @@ vuint16mf2_t test_sf_vc_v_xvv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv4i16.i32.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv4i16.i32.nxv4i16.nxv4i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv4i16.i64.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv4i16.i64.nxv4i16.nxv4i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_sf_vc_v_xvv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { @@ -1579,12 +1579,12 @@ vuint16m1_t test_sf_vc_v_xvv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv8i16.i32.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv8i16.i32.nxv8i16.nxv8i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv8i16.i64.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv8i16.i64.nxv8i16.nxv8i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_sf_vc_v_xvv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { @@ -1593,12 +1593,12 @@ vuint16m2_t test_sf_vc_v_xvv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv16i16.i32.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv16i16.i32.nxv16i16.nxv16i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv16i16.i64.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv16i16.i64.nxv16i16.nxv16i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_sf_vc_v_xvv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { @@ -1607,12 +1607,12 @@ vuint16m4_t test_sf_vc_v_xvv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv32i16.i32.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv32i16.i32.nxv32i16.nxv32i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv32i16.i64.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv32i16.i64.nxv32i16.nxv32i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_sf_vc_v_xvv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { @@ -1621,12 +1621,12 @@ vuint16m8_t test_sf_vc_v_xvv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv1i32.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv1i32.i32.nxv1i32.nxv1i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv1i32.i64.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv1i32.i64.nxv1i32.nxv1i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_sf_vc_v_xvv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { @@ -1635,12 +1635,12 @@ vuint32mf2_t test_sf_vc_v_xvv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv2i32.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv2i32.i32.nxv2i32.nxv2i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv2i32.i64.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv2i32.i64.nxv2i32.nxv2i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_sf_vc_v_xvv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { @@ -1649,12 +1649,12 @@ vuint32m1_t test_sf_vc_v_xvv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv4i32.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv4i32.i32.nxv4i32.nxv4i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv4i32.i64.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv4i32.i64.nxv4i32.nxv4i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_sf_vc_v_xvv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { @@ -1663,12 +1663,12 @@ vuint32m2_t test_sf_vc_v_xvv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv8i32.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv8i32.i32.nxv8i32.nxv8i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv8i32.i64.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv8i32.i64.nxv8i32.nxv8i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_sf_vc_v_xvv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { @@ -1677,12 +1677,12 @@ vuint32m4_t test_sf_vc_v_xvv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv16i32.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv16i32.i32.nxv16i32.nxv16i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv16i32.i64.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvv.nxv16i32.i64.nxv16i32.nxv16i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_sf_vc_v_xvv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { @@ -1691,12 +1691,12 @@ vuint32m8_t test_sf_vc_v_xvv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i8.nxv1i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv1i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv1i8.nxv1i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_ivv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { @@ -1705,12 +1705,12 @@ void test_sf_vc_ivv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i8.nxv2i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv2i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv2i8.nxv2i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_ivv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { @@ -1719,12 +1719,12 @@ void test_sf_vc_ivv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i8.nxv4i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv4i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv4i8.nxv4i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_ivv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { @@ -1733,12 +1733,12 @@ void test_sf_vc_ivv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i8.nxv8i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv8i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv8i8.nxv8i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_ivv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { @@ -1747,12 +1747,12 @@ void test_sf_vc_ivv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv16i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv16i8.nxv16i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv16i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv16i8.nxv16i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_ivv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { @@ -1761,12 +1761,12 @@ void test_sf_vc_ivv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv32i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv32i8.nxv32i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv32i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv32i8.nxv32i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_ivv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { @@ -1775,12 +1775,12 @@ void test_sf_vc_ivv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv64i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv64i8.nxv64i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv64i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv64i8.nxv64i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_ivv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { @@ -1789,12 +1789,12 @@ void test_sf_vc_ivv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i16.nxv1i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv1i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv1i16.nxv1i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_ivv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { @@ -1803,12 +1803,12 @@ void test_sf_vc_ivv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i16.nxv2i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv2i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv2i16.nxv2i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_ivv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { @@ -1817,12 +1817,12 @@ void test_sf_vc_ivv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i16.nxv4i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv4i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv4i16.nxv4i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_ivv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { @@ -1831,12 +1831,12 @@ void test_sf_vc_ivv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i16.nxv8i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv8i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv8i16.nxv8i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_ivv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { @@ -1845,12 +1845,12 @@ void test_sf_vc_ivv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv16i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv16i16.nxv16i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv16i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv16i16.nxv16i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_ivv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { @@ -1859,12 +1859,12 @@ void test_sf_vc_ivv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv32i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv32i16.nxv32i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv32i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv32i16.nxv32i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_ivv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { @@ -1873,12 +1873,12 @@ void test_sf_vc_ivv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i32.nxv1i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv1i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv1i32.nxv1i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_ivv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { @@ -1887,12 +1887,12 @@ void test_sf_vc_ivv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i32.nxv2i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv2i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv2i32.nxv2i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_ivv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { @@ -1901,12 +1901,12 @@ void test_sf_vc_ivv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i32.nxv4i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv4i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv4i32.nxv4i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_ivv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { @@ -1915,12 +1915,12 @@ void test_sf_vc_ivv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i32.nxv8i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv8i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv8i32.nxv8i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_ivv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { @@ -1929,12 +1929,12 @@ void test_sf_vc_ivv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv16i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv16i32.nxv16i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv16i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv16i32.nxv16i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_ivv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { @@ -1943,12 +1943,12 @@ void test_sf_vc_ivv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i64.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i64.nxv1i64.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv1i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv1i64.nxv1i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_ivv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { @@ -1957,12 +1957,12 @@ void test_sf_vc_ivv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i64.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i64.nxv2i64.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv2i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv2i64.nxv2i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_ivv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { @@ -1971,12 +1971,12 @@ void test_sf_vc_ivv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i64.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i64.nxv4i64.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv4i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv4i64.nxv4i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_ivv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { @@ -1985,12 +1985,12 @@ void test_sf_vc_ivv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i64.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i64.nxv8i64.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv8i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv8i64.nxv8i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_ivv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { @@ -1999,12 +1999,12 @@ void test_sf_vc_ivv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.i32.nxv1i8.nxv1i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.i64.nxv1i8.nxv1i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_sf_vc_v_ivv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { @@ -2013,12 +2013,12 @@ vuint8mf8_t test_sf_vc_v_ivv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.i32.nxv2i8.nxv2i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.i64.nxv2i8.nxv2i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_sf_vc_v_ivv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { @@ -2027,12 +2027,12 @@ vuint8mf4_t test_sf_vc_v_ivv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.i32.nxv4i8.nxv4i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.i64.nxv4i8.nxv4i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_sf_vc_v_ivv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { @@ -2041,12 +2041,12 @@ vuint8mf2_t test_sf_vc_v_ivv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.i32.nxv8i8.nxv8i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.i64.nxv8i8.nxv8i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_sf_vc_v_ivv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { @@ -2055,12 +2055,12 @@ vuint8m1_t test_sf_vc_v_ivv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.i32.nxv16i8.nxv16i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.i64.nxv16i8.nxv16i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_sf_vc_v_ivv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { @@ -2069,12 +2069,12 @@ vuint8m2_t test_sf_vc_v_ivv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.i32.nxv32i8.nxv32i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.i64.nxv32i8.nxv32i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_sf_vc_v_ivv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { @@ -2083,12 +2083,12 @@ vuint8m4_t test_sf_vc_v_ivv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.i32.nxv64i8.nxv64i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.i64.nxv64i8.nxv64i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_sf_vc_v_ivv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { @@ -2097,12 +2097,12 @@ vuint8m8_t test_sf_vc_v_ivv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.i32.nxv1i16.nxv1i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.i64.nxv1i16.nxv1i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_sf_vc_v_ivv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { @@ -2111,12 +2111,12 @@ vuint16mf4_t test_sf_vc_v_ivv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, size_ // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.i32.nxv2i16.nxv2i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.i64.nxv2i16.nxv2i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_sf_vc_v_ivv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { @@ -2125,12 +2125,12 @@ vuint16mf2_t test_sf_vc_v_ivv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, size_ // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.i32.nxv4i16.nxv4i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.i64.nxv4i16.nxv4i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_sf_vc_v_ivv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { @@ -2139,12 +2139,12 @@ vuint16m1_t test_sf_vc_v_ivv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.i32.nxv8i16.nxv8i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.i64.nxv8i16.nxv8i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_sf_vc_v_ivv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { @@ -2153,12 +2153,12 @@ vuint16m2_t test_sf_vc_v_ivv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.i32.nxv16i16.nxv16i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.i64.nxv16i16.nxv16i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_sf_vc_v_ivv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { @@ -2167,12 +2167,12 @@ vuint16m4_t test_sf_vc_v_ivv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.i32.nxv32i16.nxv32i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.i64.nxv32i16.nxv32i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_sf_vc_v_ivv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { @@ -2181,12 +2181,12 @@ vuint16m8_t test_sf_vc_v_ivv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.i32.nxv1i32.nxv1i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.i64.nxv1i32.nxv1i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_sf_vc_v_ivv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { @@ -2195,12 +2195,12 @@ vuint32mf2_t test_sf_vc_v_ivv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_ // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.i32.nxv2i32.nxv2i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.i64.nxv2i32.nxv2i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_sf_vc_v_ivv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { @@ -2209,12 +2209,12 @@ vuint32m1_t test_sf_vc_v_ivv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.i32.nxv4i32.nxv4i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.i64.nxv4i32.nxv4i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_sf_vc_v_ivv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { @@ -2223,12 +2223,12 @@ vuint32m2_t test_sf_vc_v_ivv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.i32.nxv8i32.nxv8i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.i64.nxv8i32.nxv8i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_sf_vc_v_ivv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { @@ -2237,12 +2237,12 @@ vuint32m4_t test_sf_vc_v_ivv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.i32.nxv16i32.nxv16i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.i64.nxv16i32.nxv16i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_sf_vc_v_ivv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { @@ -2251,12 +2251,12 @@ vuint32m8_t test_sf_vc_v_ivv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.i32.nxv1i64.nxv1i64.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.i64.nxv1i64.nxv1i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_sf_vc_v_ivv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { @@ -2265,12 +2265,12 @@ vuint64m1_t test_sf_vc_v_ivv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.i32.nxv2i64.nxv2i64.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.i64.nxv2i64.nxv2i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_sf_vc_v_ivv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { @@ -2279,12 +2279,12 @@ vuint64m2_t test_sf_vc_v_ivv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.i32.nxv4i64.nxv4i64.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.i64.nxv4i64.nxv4i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_sf_vc_v_ivv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { @@ -2293,12 +2293,12 @@ vuint64m4_t test_sf_vc_v_ivv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.i32.nxv8i64.nxv8i64.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.i64.nxv8i64.nxv8i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_sf_vc_v_ivv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { @@ -2307,12 +2307,12 @@ vuint64m8_t test_sf_vc_v_ivv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv1i8.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv1i8.i32.nxv1i8.nxv1i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv1i8.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv1i8.i64.nxv1i8.nxv1i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_sf_vc_v_ivv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { @@ -2321,12 +2321,12 @@ vuint8mf8_t test_sf_vc_v_ivv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv2i8.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv2i8.i32.nxv2i8.nxv2i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv2i8.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv2i8.i64.nxv2i8.nxv2i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_sf_vc_v_ivv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { @@ -2335,12 +2335,12 @@ vuint8mf4_t test_sf_vc_v_ivv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv4i8.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv4i8.i32.nxv4i8.nxv4i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv4i8.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv4i8.i64.nxv4i8.nxv4i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_sf_vc_v_ivv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { @@ -2349,12 +2349,12 @@ vuint8mf2_t test_sf_vc_v_ivv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv8i8.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv8i8.i32.nxv8i8.nxv8i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv8i8.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv8i8.i64.nxv8i8.nxv8i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_sf_vc_v_ivv_u8m1(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { @@ -2363,12 +2363,12 @@ vuint8m1_t test_sf_vc_v_ivv_u8m1(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv16i8.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv16i8.i32.nxv16i8.nxv16i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv16i8.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv16i8.i64.nxv16i8.nxv16i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_sf_vc_v_ivv_u8m2(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { @@ -2377,12 +2377,12 @@ vuint8m2_t test_sf_vc_v_ivv_u8m2(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv32i8.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv32i8.i32.nxv32i8.nxv32i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv32i8.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv32i8.i64.nxv32i8.nxv32i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_sf_vc_v_ivv_u8m4(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { @@ -2391,12 +2391,12 @@ vuint8m4_t test_sf_vc_v_ivv_u8m4(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv64i8.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv64i8.i32.nxv64i8.nxv64i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv64i8.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv64i8.i64.nxv64i8.nxv64i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_sf_vc_v_ivv_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { @@ -2405,12 +2405,12 @@ vuint8m8_t test_sf_vc_v_ivv_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv1i16.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv1i16.i32.nxv1i16.nxv1i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv1i16.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv1i16.i64.nxv1i16.nxv1i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_sf_vc_v_ivv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { @@ -2419,12 +2419,12 @@ vuint16mf4_t test_sf_vc_v_ivv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, size_t v // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv2i16.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv2i16.i32.nxv2i16.nxv2i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv2i16.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv2i16.i64.nxv2i16.nxv2i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_sf_vc_v_ivv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { @@ -2433,12 +2433,12 @@ vuint16mf2_t test_sf_vc_v_ivv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, size_t v // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv4i16.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv4i16.i32.nxv4i16.nxv4i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv4i16.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv4i16.i64.nxv4i16.nxv4i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_sf_vc_v_ivv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { @@ -2447,12 +2447,12 @@ vuint16m1_t test_sf_vc_v_ivv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv8i16.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv8i16.i32.nxv8i16.nxv8i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv8i16.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv8i16.i64.nxv8i16.nxv8i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_sf_vc_v_ivv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { @@ -2461,12 +2461,12 @@ vuint16m2_t test_sf_vc_v_ivv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv16i16.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv16i16.i32.nxv16i16.nxv16i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv16i16.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv16i16.i64.nxv16i16.nxv16i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_sf_vc_v_ivv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { @@ -2475,12 +2475,12 @@ vuint16m4_t test_sf_vc_v_ivv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv32i16.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv32i16.i32.nxv32i16.nxv32i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv32i16.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv32i16.i64.nxv32i16.nxv32i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_sf_vc_v_ivv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { @@ -2489,12 +2489,12 @@ vuint16m8_t test_sf_vc_v_ivv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv1i32.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv1i32.i32.nxv1i32.nxv1i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv1i32.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv1i32.i64.nxv1i32.nxv1i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_sf_vc_v_ivv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { @@ -2503,12 +2503,12 @@ vuint32mf2_t test_sf_vc_v_ivv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t v // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv2i32.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv2i32.i32.nxv2i32.nxv2i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv2i32.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv2i32.i64.nxv2i32.nxv2i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_sf_vc_v_ivv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { @@ -2517,12 +2517,12 @@ vuint32m1_t test_sf_vc_v_ivv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv4i32.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv4i32.i32.nxv4i32.nxv4i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv4i32.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv4i32.i64.nxv4i32.nxv4i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_sf_vc_v_ivv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { @@ -2531,12 +2531,12 @@ vuint32m2_t test_sf_vc_v_ivv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv8i32.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv8i32.i32.nxv8i32.nxv8i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv8i32.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv8i32.i64.nxv8i32.nxv8i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_sf_vc_v_ivv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { @@ -2545,12 +2545,12 @@ vuint32m4_t test_sf_vc_v_ivv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv16i32.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv16i32.i32.nxv16i32.nxv16i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv16i32.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv16i32.i64.nxv16i32.nxv16i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_sf_vc_v_ivv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { @@ -2559,12 +2559,12 @@ vuint32m8_t test_sf_vc_v_ivv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv1i64.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv1i64.i32.nxv1i64.nxv1i64.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv1i64.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv1i64.i64.nxv1i64.nxv1i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_sf_vc_v_ivv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { @@ -2573,12 +2573,12 @@ vuint64m1_t test_sf_vc_v_ivv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv2i64.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv2i64.i32.nxv2i64.nxv2i64.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv2i64.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv2i64.i64.nxv2i64.nxv2i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_sf_vc_v_ivv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { @@ -2587,12 +2587,12 @@ vuint64m2_t test_sf_vc_v_ivv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv4i64.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv4i64.i32.nxv4i64.nxv4i64.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv4i64.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv4i64.i64.nxv4i64.nxv4i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_sf_vc_v_ivv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { @@ -2601,12 +2601,12 @@ vuint64m4_t test_sf_vc_v_ivv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv8i64.i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv8i64.i32.nxv8i64.nxv8i64.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv8i64.i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivv.nxv8i64.i64.nxv8i64.nxv8i64.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_sf_vc_v_ivv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { @@ -2615,12 +2615,12 @@ vuint64m8_t test_sf_vc_v_ivv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv1i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv1i16.nxv1i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv1i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv1i16.nxv1i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_fvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, _Float16 fs1, size_t vl) { @@ -2629,12 +2629,12 @@ void test_sf_vc_fvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, _Float16 fs1, s // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv2i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv2i16.nxv2i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv2i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv2i16.nxv2i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_fvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, _Float16 fs1, size_t vl) { @@ -2643,12 +2643,12 @@ void test_sf_vc_fvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, _Float16 fs1, s // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv4i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv4i16.nxv4i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv4i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv4i16.nxv4i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_fvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, _Float16 fs1, size_t vl) { @@ -2657,12 +2657,12 @@ void test_sf_vc_fvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, _Float16 fs1, size // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv8i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv8i16.nxv8i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv8i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv8i16.nxv8i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_fvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, _Float16 fs1, size_t vl) { @@ -2671,12 +2671,12 @@ void test_sf_vc_fvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, _Float16 fs1, size // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv16i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv16i16.nxv16i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv16i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv16i16.nxv16i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_fvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, _Float16 fs1, size_t vl) { @@ -2685,12 +2685,12 @@ void test_sf_vc_fvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, _Float16 fs1, size // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv32i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv32i16.nxv32i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv32i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv32i16.nxv32i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_fvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, _Float16 fs1, size_t vl) { @@ -2699,12 +2699,12 @@ void test_sf_vc_fvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, _Float16 fs1, size // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv1i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv1i32.nxv1i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv1i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv1i32.nxv1i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_fvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, float fs1, size_t vl) { @@ -2713,12 +2713,12 @@ void test_sf_vc_fvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, float fs1, size // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv2i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv2i32.nxv2i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv2i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv2i32.nxv2i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_fvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, float fs1, size_t vl) { @@ -2727,12 +2727,12 @@ void test_sf_vc_fvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, float fs1, size_t // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv4i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv4i32.nxv4i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv4i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv4i32.nxv4i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_fvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, float fs1, size_t vl) { @@ -2741,12 +2741,12 @@ void test_sf_vc_fvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, float fs1, size_t // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv8i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv8i32.nxv8i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv8i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv8i32.nxv8i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_fvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, float fs1, size_t vl) { @@ -2755,12 +2755,12 @@ void test_sf_vc_fvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, float fs1, size_t // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv16i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv16i32.nxv16i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv16i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv16i32.nxv16i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_fvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, float fs1, size_t vl) { @@ -2769,12 +2769,12 @@ void test_sf_vc_fvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, float fs1, size_t // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv1i64.f64.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv1i64.nxv1i64.f64.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv1i64.f64.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv1i64.nxv1i64.f64.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_fvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, double fs1, size_t vl) { @@ -2783,12 +2783,12 @@ void test_sf_vc_fvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, double fs1, size_t // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv2i64.f64.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv2i64.nxv2i64.f64.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv2i64.f64.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv2i64.nxv2i64.f64.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_fvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, double fs1, size_t vl) { @@ -2797,12 +2797,12 @@ void test_sf_vc_fvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, double fs1, size_t // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv4i64.f64.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv4i64.nxv4i64.f64.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv4i64.f64.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv4i64.nxv4i64.f64.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_fvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, double fs1, size_t vl) { @@ -2811,12 +2811,12 @@ void test_sf_vc_fvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, double fs1, size_t // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv8i64.f64.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv8i64.nxv8i64.f64.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv8i64.f64.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv8i64.nxv8i64.f64.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_sf_vc_fvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, double fs1, size_t vl) { @@ -2825,12 +2825,12 @@ void test_sf_vc_fvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, double fs1, size_t // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.i32.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.i32.nxv1i16.nxv1i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.i64.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.i64.nxv1i16.nxv1i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_sf_vc_v_fvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, _Float16 fs1, size_t vl) { @@ -2839,12 +2839,12 @@ vuint16mf4_t test_sf_vc_v_fvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, _Floa // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.i32.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.i32.nxv2i16.nxv2i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.i64.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.i64.nxv2i16.nxv2i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_sf_vc_v_fvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, _Float16 fs1, size_t vl) { @@ -2853,12 +2853,12 @@ vuint16mf2_t test_sf_vc_v_fvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, _Floa // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.i32.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.i32.nxv4i16.nxv4i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.i64.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.i64.nxv4i16.nxv4i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_sf_vc_v_fvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, _Float16 fs1, size_t vl) { @@ -2867,12 +2867,12 @@ vuint16m1_t test_sf_vc_v_fvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, _Float16 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.i32.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.i32.nxv8i16.nxv8i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.i64.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.i64.nxv8i16.nxv8i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_sf_vc_v_fvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, _Float16 fs1, size_t vl) { @@ -2881,12 +2881,12 @@ vuint16m2_t test_sf_vc_v_fvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, _Float16 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.i32.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.i32.nxv16i16.nxv16i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.i64.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.i64.nxv16i16.nxv16i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_sf_vc_v_fvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, _Float16 fs1, size_t vl) { @@ -2895,12 +2895,12 @@ vuint16m4_t test_sf_vc_v_fvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, _Float16 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.i32.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.i32.nxv32i16.nxv32i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.i64.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.i64.nxv32i16.nxv32i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_sf_vc_v_fvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, _Float16 fs1, size_t vl) { @@ -2909,12 +2909,12 @@ vuint16m8_t test_sf_vc_v_fvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, _Float16 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv1i32.i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv1i32.i32.nxv1i32.nxv1i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv1i32.i64.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv1i32.i64.nxv1i32.nxv1i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_sf_vc_v_fvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, float fs1, size_t vl) { @@ -2923,12 +2923,12 @@ vuint32mf2_t test_sf_vc_v_fvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, float // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv2i32.i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv2i32.i32.nxv2i32.nxv2i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv2i32.i64.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv2i32.i64.nxv2i32.nxv2i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_sf_vc_v_fvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, float fs1, size_t vl) { @@ -2937,12 +2937,12 @@ vuint32m1_t test_sf_vc_v_fvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, float fs1 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv4i32.i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv4i32.i32.nxv4i32.nxv4i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv4i32.i64.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv4i32.i64.nxv4i32.nxv4i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_sf_vc_v_fvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, float fs1, size_t vl) { @@ -2951,12 +2951,12 @@ vuint32m2_t test_sf_vc_v_fvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, float fs1 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv8i32.i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv8i32.i32.nxv8i32.nxv8i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv8i32.i64.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv8i32.i64.nxv8i32.nxv8i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_sf_vc_v_fvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, float fs1, size_t vl) { @@ -2965,12 +2965,12 @@ vuint32m4_t test_sf_vc_v_fvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, float fs1 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv16i32.i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv16i32.i32.nxv16i32.nxv16i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv16i32.i64.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv16i32.i64.nxv16i32.nxv16i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_sf_vc_v_fvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, float fs1, size_t vl) { @@ -2979,12 +2979,12 @@ vuint32m8_t test_sf_vc_v_fvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, float fs1 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv1i64.i32.f64.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv1i64.i32.nxv1i64.nxv1i64.f64.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv1i64.i64.f64.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv1i64.i64.nxv1i64.nxv1i64.f64.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_sf_vc_v_fvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, double fs1, size_t vl) { @@ -2993,12 +2993,12 @@ vuint64m1_t test_sf_vc_v_fvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, double fs // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv2i64.i32.f64.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv2i64.i32.nxv2i64.nxv2i64.f64.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv2i64.i64.f64.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv2i64.i64.nxv2i64.nxv2i64.f64.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_sf_vc_v_fvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, double fs1, size_t vl) { @@ -3007,12 +3007,12 @@ vuint64m2_t test_sf_vc_v_fvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, double fs // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv4i64.i32.f64.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv4i64.i32.nxv4i64.nxv4i64.f64.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv4i64.i64.f64.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv4i64.i64.nxv4i64.nxv4i64.f64.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_sf_vc_v_fvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, double fs1, size_t vl) { @@ -3021,12 +3021,12 @@ vuint64m4_t test_sf_vc_v_fvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, double fs // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv8i64.i32.f64.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv8i64.i32.nxv8i64.nxv8i64.f64.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv8i64.i64.f64.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.se.nxv8i64.i64.nxv8i64.nxv8i64.f64.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_sf_vc_v_fvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, double fs1, size_t vl) { @@ -3035,12 +3035,12 @@ vuint64m8_t test_sf_vc_v_fvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, double fs // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv1i16.i32.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv1i16.i32.nxv1i16.nxv1i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv1i16.i64.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv1i16.i64.nxv1i16.nxv1i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_sf_vc_v_fvv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, _Float16 fs1, size_t vl) { @@ -3049,12 +3049,12 @@ vuint16mf4_t test_sf_vc_v_fvv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, _Float16 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv2i16.i32.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv2i16.i32.nxv2i16.nxv2i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv2i16.i64.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv2i16.i64.nxv2i16.nxv2i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_sf_vc_v_fvv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, _Float16 fs1, size_t vl) { @@ -3063,12 +3063,12 @@ vuint16mf2_t test_sf_vc_v_fvv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, _Float16 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv4i16.i32.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv4i16.i32.nxv4i16.nxv4i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv4i16.i64.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv4i16.i64.nxv4i16.nxv4i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_sf_vc_v_fvv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, _Float16 fs1, size_t vl) { @@ -3077,12 +3077,12 @@ vuint16m1_t test_sf_vc_v_fvv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, _Float16 fs1 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv8i16.i32.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv8i16.i32.nxv8i16.nxv8i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv8i16.i64.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv8i16.i64.nxv8i16.nxv8i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_sf_vc_v_fvv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, _Float16 fs1, size_t vl) { @@ -3091,12 +3091,12 @@ vuint16m2_t test_sf_vc_v_fvv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, _Float16 fs1 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv16i16.i32.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv16i16.i32.nxv16i16.nxv16i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv16i16.i64.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv16i16.i64.nxv16i16.nxv16i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_sf_vc_v_fvv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, _Float16 fs1, size_t vl) { @@ -3105,12 +3105,12 @@ vuint16m4_t test_sf_vc_v_fvv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, _Float16 fs1 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv32i16.i32.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv32i16.i32.nxv32i16.nxv32i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv32i16.i64.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv32i16.i64.nxv32i16.nxv32i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_sf_vc_v_fvv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, _Float16 fs1, size_t vl) { @@ -3119,12 +3119,12 @@ vuint16m8_t test_sf_vc_v_fvv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, _Float16 fs1 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv1i32.i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv1i32.i32.nxv1i32.nxv1i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv1i32.i64.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv1i32.i64.nxv1i32.nxv1i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_sf_vc_v_fvv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, float fs1, size_t vl) { @@ -3133,12 +3133,12 @@ vuint32mf2_t test_sf_vc_v_fvv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, float fs // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv2i32.i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv2i32.i32.nxv2i32.nxv2i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv2i32.i64.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv2i32.i64.nxv2i32.nxv2i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_sf_vc_v_fvv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, float fs1, size_t vl) { @@ -3147,12 +3147,12 @@ vuint32m1_t test_sf_vc_v_fvv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, float fs1, s // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv4i32.i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv4i32.i32.nxv4i32.nxv4i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv4i32.i64.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv4i32.i64.nxv4i32.nxv4i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_sf_vc_v_fvv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, float fs1, size_t vl) { @@ -3161,12 +3161,12 @@ vuint32m2_t test_sf_vc_v_fvv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, float fs1, s // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv8i32.i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv8i32.i32.nxv8i32.nxv8i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv8i32.i64.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv8i32.i64.nxv8i32.nxv8i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_sf_vc_v_fvv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, float fs1, size_t vl) { @@ -3175,12 +3175,12 @@ vuint32m4_t test_sf_vc_v_fvv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, float fs1, s // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv16i32.i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv16i32.i32.nxv16i32.nxv16i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv16i32.i64.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv16i32.i64.nxv16i32.nxv16i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_sf_vc_v_fvv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, float fs1, size_t vl) { @@ -3189,12 +3189,12 @@ vuint32m8_t test_sf_vc_v_fvv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, float fs1, s // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv1i64.i32.f64.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv1i64.i32.nxv1i64.nxv1i64.f64.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv1i64.i64.f64.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv1i64.i64.nxv1i64.nxv1i64.f64.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_sf_vc_v_fvv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, double fs1, size_t vl) { @@ -3203,12 +3203,12 @@ vuint64m1_t test_sf_vc_v_fvv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, double fs1, // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv2i64.i32.f64.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv2i64.i32.nxv2i64.nxv2i64.f64.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv2i64.i64.f64.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv2i64.i64.nxv2i64.nxv2i64.f64.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_sf_vc_v_fvv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, double fs1, size_t vl) { @@ -3217,12 +3217,12 @@ vuint64m2_t test_sf_vc_v_fvv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, double fs1, // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv4i64.i32.f64.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv4i64.i32.nxv4i64.nxv4i64.f64.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv4i64.i64.f64.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv4i64.i64.nxv4i64.nxv4i64.f64.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_sf_vc_v_fvv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, double fs1, size_t vl) { @@ -3231,12 +3231,12 @@ vuint64m4_t test_sf_vc_v_fvv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, double fs1, // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv8i64.i32.f64.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv8i64.i32.nxv8i64.nxv8i64.f64.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv8i64.i64.f64.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvv.nxv8i64.i64.nxv8i64.nxv8i64.f64.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_sf_vc_v_fvv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, double fs1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvw.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvw.c index 23ee2b7bb0f4e..727043077424b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvw.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/xsfvcp-xvw.c @@ -221,12 +221,12 @@ void test_sf_vc_vvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, s // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.i32.nxv1i8.nxv1i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.i32.nxv1i16.nxv1i8.nxv1i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.i64.nxv1i8.nxv1i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.i64.nxv1i16.nxv1i8.nxv1i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_sf_vc_v_vvw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { @@ -235,12 +235,12 @@ vuint16mf4_t test_sf_vc_v_vvw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8m // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.i32.nxv2i8.nxv2i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.i32.nxv2i16.nxv2i8.nxv2i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.i64.nxv2i8.nxv2i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.i64.nxv2i16.nxv2i8.nxv2i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_sf_vc_v_vvw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { @@ -249,12 +249,12 @@ vuint16mf2_t test_sf_vc_v_vvw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8m // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.i32.nxv4i8.nxv4i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.i32.nxv4i16.nxv4i8.nxv4i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.i64.nxv4i8.nxv4i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.i64.nxv4i16.nxv4i8.nxv4i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_sf_vc_v_vvw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { @@ -263,12 +263,12 @@ vuint16m1_t test_sf_vc_v_vvw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.i32.nxv8i8.nxv8i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.i32.nxv8i16.nxv8i8.nxv8i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.i64.nxv8i8.nxv8i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.i64.nxv8i16.nxv8i8.nxv8i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_sf_vc_v_vvw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { @@ -277,12 +277,12 @@ vuint16m2_t test_sf_vc_v_vvw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.i32.nxv16i8.nxv16i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.i32.nxv16i16.nxv16i8.nxv16i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.i64.nxv16i8.nxv16i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.i64.nxv16i16.nxv16i8.nxv16i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_sf_vc_v_vvw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { @@ -291,12 +291,12 @@ vuint16m4_t test_sf_vc_v_vvw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.i32.nxv32i8.nxv32i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.i32.nxv32i16.nxv32i8.nxv32i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.i64.nxv32i8.nxv32i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.i64.nxv32i16.nxv32i8.nxv32i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_sf_vc_v_vvw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { @@ -305,12 +305,12 @@ vuint16m8_t test_sf_vc_v_vvw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.i32.nxv1i16.nxv1i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.i32.nxv1i32.nxv1i16.nxv1i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.i64.nxv1i16.nxv1i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.i64.nxv1i32.nxv1i16.nxv1i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_sf_vc_v_vvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { @@ -319,12 +319,12 @@ vuint32mf2_t test_sf_vc_v_vvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, vuint // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.i32.nxv2i16.nxv2i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.i32.nxv2i32.nxv2i16.nxv2i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.i64.nxv2i16.nxv2i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.i64.nxv2i32.nxv2i16.nxv2i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_sf_vc_v_vvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { @@ -333,12 +333,12 @@ vuint32m1_t test_sf_vc_v_vvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.i32.nxv4i16.nxv4i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.i32.nxv4i32.nxv4i16.nxv4i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.i64.nxv4i16.nxv4i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.i64.nxv4i32.nxv4i16.nxv4i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_sf_vc_v_vvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { @@ -347,12 +347,12 @@ vuint32m2_t test_sf_vc_v_vvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.i32.nxv8i16.nxv8i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.i32.nxv8i32.nxv8i16.nxv8i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.i64.nxv8i16.nxv8i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.i64.nxv8i32.nxv8i16.nxv8i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_sf_vc_v_vvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { @@ -361,12 +361,12 @@ vuint32m4_t test_sf_vc_v_vvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.i32.nxv16i16.nxv16i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.i32.nxv16i32.nxv16i16.nxv16i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.i64.nxv16i16.nxv16i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.i64.nxv16i32.nxv16i16.nxv16i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_sf_vc_v_vvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { @@ -375,12 +375,12 @@ vuint32m8_t test_sf_vc_v_vvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.i32.nxv1i32.nxv1i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.i32.nxv1i64.nxv1i32.nxv1i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.i64.nxv1i32.nxv1i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.i64.nxv1i64.nxv1i32.nxv1i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_sf_vc_v_vvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { @@ -389,12 +389,12 @@ vuint64m1_t test_sf_vc_v_vvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, vuint32 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.i32.nxv2i32.nxv2i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.i32.nxv2i64.nxv2i32.nxv2i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.i64.nxv2i32.nxv2i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.i64.nxv2i64.nxv2i32.nxv2i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_sf_vc_v_vvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { @@ -403,12 +403,12 @@ vuint64m2_t test_sf_vc_v_vvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.i32.nxv4i32.nxv4i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.i32.nxv4i64.nxv4i32.nxv4i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.i64.nxv4i32.nxv4i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.i64.nxv4i64.nxv4i32.nxv4i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_sf_vc_v_vvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { @@ -417,12 +417,12 @@ vuint64m4_t test_sf_vc_v_vvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.i32.nxv8i32.nxv8i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.i32.nxv8i64.nxv8i32.nxv8i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.i64.nxv8i32.nxv8i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.i64.nxv8i64.nxv8i32.nxv8i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_sf_vc_v_vvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { @@ -431,12 +431,12 @@ vuint64m8_t test_sf_vc_v_vvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv1i16.i32.nxv1i8.nxv1i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv1i16.i32.nxv1i16.nxv1i8.nxv1i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv1i16.i64.nxv1i8.nxv1i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv1i16.i64.nxv1i16.nxv1i8.nxv1i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_sf_vc_v_vvw_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { @@ -445,12 +445,12 @@ vuint16mf4_t test_sf_vc_v_vvw_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_ // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv2i16.i32.nxv2i8.nxv2i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv2i16.i32.nxv2i16.nxv2i8.nxv2i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv2i16.i64.nxv2i8.nxv2i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv2i16.i64.nxv2i16.nxv2i8.nxv2i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_sf_vc_v_vvw_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { @@ -459,12 +459,12 @@ vuint16mf2_t test_sf_vc_v_vvw_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_ // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv4i16.i32.nxv4i8.nxv4i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv4i16.i32.nxv4i16.nxv4i8.nxv4i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv4i16.i64.nxv4i8.nxv4i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv4i16.i64.nxv4i16.nxv4i8.nxv4i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_sf_vc_v_vvw_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { @@ -473,12 +473,12 @@ vuint16m1_t test_sf_vc_v_vvw_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv8i16.i32.nxv8i8.nxv8i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv8i16.i32.nxv8i16.nxv8i8.nxv8i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv8i16.i64.nxv8i8.nxv8i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv8i16.i64.nxv8i16.nxv8i8.nxv8i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_sf_vc_v_vvw_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { @@ -487,12 +487,12 @@ vuint16m2_t test_sf_vc_v_vvw_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv16i16.i32.nxv16i8.nxv16i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv16i16.i32.nxv16i16.nxv16i8.nxv16i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv16i16.i64.nxv16i8.nxv16i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv16i16.i64.nxv16i16.nxv16i8.nxv16i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_sf_vc_v_vvw_u8m2(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { @@ -501,12 +501,12 @@ vuint16m4_t test_sf_vc_v_vvw_u8m2(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv32i16.i32.nxv32i8.nxv32i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv32i16.i32.nxv32i16.nxv32i8.nxv32i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv32i16.i64.nxv32i8.nxv32i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv32i16.i64.nxv32i16.nxv32i8.nxv32i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_sf_vc_v_vvw_u8m4(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { @@ -515,12 +515,12 @@ vuint16m8_t test_sf_vc_v_vvw_u8m4(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv1i32.i32.nxv1i16.nxv1i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv1i32.i32.nxv1i32.nxv1i16.nxv1i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv1i32.i64.nxv1i16.nxv1i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv1i32.i64.nxv1i32.nxv1i16.nxv1i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_sf_vc_v_vvw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { @@ -529,12 +529,12 @@ vuint32mf2_t test_sf_vc_v_vvw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16m // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv2i32.i32.nxv2i16.nxv2i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv2i32.i32.nxv2i32.nxv2i16.nxv2i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv2i32.i64.nxv2i16.nxv2i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv2i32.i64.nxv2i32.nxv2i16.nxv2i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_sf_vc_v_vvw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { @@ -543,12 +543,12 @@ vuint32m1_t test_sf_vc_v_vvw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv4i32.i32.nxv4i16.nxv4i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv4i32.i32.nxv4i32.nxv4i16.nxv4i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv4i32.i64.nxv4i16.nxv4i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv4i32.i64.nxv4i32.nxv4i16.nxv4i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_sf_vc_v_vvw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { @@ -557,12 +557,12 @@ vuint32m2_t test_sf_vc_v_vvw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv8i32.i32.nxv8i16.nxv8i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv8i32.i32.nxv8i32.nxv8i16.nxv8i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv8i32.i64.nxv8i16.nxv8i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv8i32.i64.nxv8i32.nxv8i16.nxv8i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_sf_vc_v_vvw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { @@ -571,12 +571,12 @@ vuint32m4_t test_sf_vc_v_vvw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv16i32.i32.nxv16i16.nxv16i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv16i32.i32.nxv16i32.nxv16i16.nxv16i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv16i32.i64.nxv16i16.nxv16i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv16i32.i64.nxv16i32.nxv16i16.nxv16i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_sf_vc_v_vvw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { @@ -585,12 +585,12 @@ vuint32m8_t test_sf_vc_v_vvw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv1i64.i32.nxv1i32.nxv1i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv1i64.i32.nxv1i64.nxv1i32.nxv1i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv1i64.i64.nxv1i32.nxv1i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv1i64.i64.nxv1i64.nxv1i32.nxv1i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_sf_vc_v_vvw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { @@ -599,12 +599,12 @@ vuint64m1_t test_sf_vc_v_vvw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv2i64.i32.nxv2i32.nxv2i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv2i64.i32.nxv2i64.nxv2i32.nxv2i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv2i64.i64.nxv2i32.nxv2i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv2i64.i64.nxv2i64.nxv2i32.nxv2i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_sf_vc_v_vvw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { @@ -613,12 +613,12 @@ vuint64m2_t test_sf_vc_v_vvw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv4i64.i32.nxv4i32.nxv4i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv4i64.i32.nxv4i64.nxv4i32.nxv4i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv4i64.i64.nxv4i32.nxv4i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv4i64.i64.nxv4i64.nxv4i32.nxv4i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_sf_vc_v_vvw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { @@ -627,12 +627,12 @@ vuint64m4_t test_sf_vc_v_vvw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv8i64.i32.nxv8i32.nxv8i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv8i64.i32.nxv8i64.nxv8i32.nxv8i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv8i64.i64.nxv8i32.nxv8i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.vvw.nxv8i64.i64.nxv8i64.nxv8i32.nxv8i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_sf_vc_v_vvw_u32m4(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { @@ -851,12 +851,12 @@ void test_sf_vc_xvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.i32.nxv1i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.i32.nxv1i16.nxv1i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.i64.nxv1i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.i64.nxv1i16.nxv1i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_sf_vc_v_xvw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { @@ -865,12 +865,12 @@ vuint16mf4_t test_sf_vc_v_xvw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.i32.nxv2i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.i32.nxv2i16.nxv2i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.i64.nxv2i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.i64.nxv2i16.nxv2i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_sf_vc_v_xvw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { @@ -879,12 +879,12 @@ vuint16mf2_t test_sf_vc_v_xvw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.i32.nxv4i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.i32.nxv4i16.nxv4i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.i64.nxv4i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.i64.nxv4i16.nxv4i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_sf_vc_v_xvw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { @@ -893,12 +893,12 @@ vuint16m1_t test_sf_vc_v_xvw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, uint8_t r // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.i32.nxv8i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.i32.nxv8i16.nxv8i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.i64.nxv8i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.i64.nxv8i16.nxv8i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_sf_vc_v_xvw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { @@ -907,12 +907,12 @@ vuint16m2_t test_sf_vc_v_xvw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.i32.nxv16i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.i32.nxv16i16.nxv16i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.i64.nxv16i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.i64.nxv16i16.nxv16i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_sf_vc_v_xvw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { @@ -921,12 +921,12 @@ vuint16m4_t test_sf_vc_v_xvw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.i32.nxv32i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.i32.nxv32i16.nxv32i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.i64.nxv32i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.i64.nxv32i16.nxv32i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_sf_vc_v_xvw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { @@ -935,12 +935,12 @@ vuint16m8_t test_sf_vc_v_xvw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.i32.nxv1i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.i32.nxv1i32.nxv1i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.i64.nxv1i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.i64.nxv1i32.nxv1i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_sf_vc_v_xvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { @@ -949,12 +949,12 @@ vuint32mf2_t test_sf_vc_v_xvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, uint1 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.i32.nxv2i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.i32.nxv2i32.nxv2i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.i64.nxv2i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.i64.nxv2i32.nxv2i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_sf_vc_v_xvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { @@ -963,12 +963,12 @@ vuint32m1_t test_sf_vc_v_xvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, uint16_ // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.i32.nxv4i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.i32.nxv4i32.nxv4i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.i64.nxv4i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.i64.nxv4i32.nxv4i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_sf_vc_v_xvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { @@ -977,12 +977,12 @@ vuint32m2_t test_sf_vc_v_xvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, uint16_t // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.i32.nxv8i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.i32.nxv8i32.nxv8i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.i64.nxv8i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.i64.nxv8i32.nxv8i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_sf_vc_v_xvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { @@ -991,12 +991,12 @@ vuint32m4_t test_sf_vc_v_xvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, uint16_t // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.i32.nxv16i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.i32.nxv16i32.nxv16i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.i64.nxv16i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.i64.nxv16i32.nxv16i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_sf_vc_v_xvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { @@ -1005,12 +1005,12 @@ vuint32m8_t test_sf_vc_v_xvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, uint16_t // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i32.nxv1i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i32.nxv1i64.nxv1i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i64.nxv1i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i64.nxv1i64.nxv1i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_sf_vc_v_xvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { @@ -1019,12 +1019,12 @@ vuint64m1_t test_sf_vc_v_xvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, uint32_ // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i32.nxv2i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i32.nxv2i64.nxv2i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i64.nxv2i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i64.nxv2i64.nxv2i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_sf_vc_v_xvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { @@ -1033,12 +1033,12 @@ vuint64m2_t test_sf_vc_v_xvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, uint32_t // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i32.nxv4i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i32.nxv4i64.nxv4i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i64.nxv4i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i64.nxv4i64.nxv4i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_sf_vc_v_xvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { @@ -1047,12 +1047,12 @@ vuint64m4_t test_sf_vc_v_xvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, uint32_t // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i32.nxv8i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i32.nxv8i64.nxv8i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i64.nxv8i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i64.nxv8i64.nxv8i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_sf_vc_v_xvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { @@ -1061,12 +1061,12 @@ vuint64m8_t test_sf_vc_v_xvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, uint32_t // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv1i16.i32.nxv1i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv1i16.i32.nxv1i16.nxv1i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv1i16.i64.nxv1i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv1i16.i64.nxv1i16.nxv1i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_sf_vc_v_xvw_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { @@ -1075,12 +1075,12 @@ vuint16mf4_t test_sf_vc_v_xvw_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv2i16.i32.nxv2i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv2i16.i32.nxv2i16.nxv2i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv2i16.i64.nxv2i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv2i16.i64.nxv2i16.nxv2i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_sf_vc_v_xvw_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { @@ -1089,12 +1089,12 @@ vuint16mf2_t test_sf_vc_v_xvw_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv4i16.i32.nxv4i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv4i16.i32.nxv4i16.nxv4i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv4i16.i64.nxv4i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv4i16.i64.nxv4i16.nxv4i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_sf_vc_v_xvw_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { @@ -1103,12 +1103,12 @@ vuint16m1_t test_sf_vc_v_xvw_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv8i16.i32.nxv8i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv8i16.i32.nxv8i16.nxv8i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv8i16.i64.nxv8i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv8i16.i64.nxv8i16.nxv8i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_sf_vc_v_xvw_u8m1(vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { @@ -1117,12 +1117,12 @@ vuint16m2_t test_sf_vc_v_xvw_u8m1(vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, s // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv16i16.i32.nxv16i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv16i16.i32.nxv16i16.nxv16i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv16i16.i64.nxv16i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv16i16.i64.nxv16i16.nxv16i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_sf_vc_v_xvw_u8m2(vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { @@ -1131,12 +1131,12 @@ vuint16m4_t test_sf_vc_v_xvw_u8m2(vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, s // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv32i16.i32.nxv32i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv32i16.i32.nxv32i16.nxv32i8.i8.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv32i16.i64.nxv32i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv32i16.i64.nxv32i16.nxv32i8.i8.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_sf_vc_v_xvw_u8m4(vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { @@ -1145,12 +1145,12 @@ vuint16m8_t test_sf_vc_v_xvw_u8m4(vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, s // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv1i32.i32.nxv1i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv1i32.i32.nxv1i32.nxv1i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv1i32.i64.nxv1i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv1i32.i64.nxv1i32.nxv1i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_sf_vc_v_xvw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { @@ -1159,12 +1159,12 @@ vuint32mf2_t test_sf_vc_v_xvw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv2i32.i32.nxv2i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv2i32.i32.nxv2i32.nxv2i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv2i32.i64.nxv2i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv2i32.i64.nxv2i32.nxv2i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_sf_vc_v_xvw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { @@ -1173,12 +1173,12 @@ vuint32m1_t test_sf_vc_v_xvw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, uint16_t r // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv4i32.i32.nxv4i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv4i32.i32.nxv4i32.nxv4i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv4i32.i64.nxv4i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv4i32.i64.nxv4i32.nxv4i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_sf_vc_v_xvw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { @@ -1187,12 +1187,12 @@ vuint32m2_t test_sf_vc_v_xvw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv8i32.i32.nxv8i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv8i32.i32.nxv8i32.nxv8i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv8i32.i64.nxv8i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv8i32.i64.nxv8i32.nxv8i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_sf_vc_v_xvw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { @@ -1201,12 +1201,12 @@ vuint32m4_t test_sf_vc_v_xvw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv16i32.i32.nxv16i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv16i32.i32.nxv16i32.nxv16i16.i16.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv16i32.i64.nxv16i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv16i32.i64.nxv16i32.nxv16i16.i16.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_sf_vc_v_xvw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { @@ -1215,12 +1215,12 @@ vuint32m8_t test_sf_vc_v_xvw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv1i64.i32.nxv1i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv1i64.i32.nxv1i64.nxv1i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv1i64.i64.nxv1i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv1i64.i64.nxv1i64.nxv1i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_sf_vc_v_xvw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { @@ -1229,12 +1229,12 @@ vuint64m1_t test_sf_vc_v_xvw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, uint32_t r // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv2i64.i32.nxv2i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv2i64.i32.nxv2i64.nxv2i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv2i64.i64.nxv2i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv2i64.i64.nxv2i64.nxv2i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_sf_vc_v_xvw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { @@ -1243,12 +1243,12 @@ vuint64m2_t test_sf_vc_v_xvw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv4i64.i32.nxv4i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv4i64.i32.nxv4i64.nxv4i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv4i64.i64.nxv4i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv4i64.i64.nxv4i64.nxv4i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_sf_vc_v_xvw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { @@ -1257,12 +1257,12 @@ vuint64m4_t test_sf_vc_v_xvw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv8i64.i32.nxv8i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv8i64.i32.nxv8i64.nxv8i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv8i64.i64.nxv8i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.xvw.nxv8i64.i64.nxv8i64.nxv8i32.i32.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_sf_vc_v_xvw_u32m4(vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { @@ -1481,12 +1481,12 @@ void test_sf_vc_ivw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.i32.nxv1i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.i32.nxv1i16.nxv1i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.i64.nxv1i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.i64.nxv1i16.nxv1i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_sf_vc_v_ivw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { @@ -1495,12 +1495,12 @@ vuint16mf4_t test_sf_vc_v_ivw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, size_t // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.i32.nxv2i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.i32.nxv2i16.nxv2i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.i64.nxv2i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.i64.nxv2i16.nxv2i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_sf_vc_v_ivw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { @@ -1509,12 +1509,12 @@ vuint16mf2_t test_sf_vc_v_ivw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, size_t // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.i32.nxv4i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.i32.nxv4i16.nxv4i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.i64.nxv4i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.i64.nxv4i16.nxv4i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_sf_vc_v_ivw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { @@ -1523,12 +1523,12 @@ vuint16m1_t test_sf_vc_v_ivw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.i32.nxv8i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.i32.nxv8i16.nxv8i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.i64.nxv8i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.i64.nxv8i16.nxv8i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_sf_vc_v_ivw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { @@ -1537,12 +1537,12 @@ vuint16m2_t test_sf_vc_v_ivw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, size_t vl) // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.i32.nxv16i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.i32.nxv16i16.nxv16i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.i64.nxv16i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.i64.nxv16i16.nxv16i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_sf_vc_v_ivw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { @@ -1551,12 +1551,12 @@ vuint16m4_t test_sf_vc_v_ivw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, size_t vl) // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.i32.nxv32i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.i32.nxv32i16.nxv32i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.i64.nxv32i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.i64.nxv32i16.nxv32i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_sf_vc_v_ivw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { @@ -1565,12 +1565,12 @@ vuint16m8_t test_sf_vc_v_ivw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, size_t vl) // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.i32.nxv1i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.i32.nxv1i32.nxv1i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.i64.nxv1i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.i64.nxv1i32.nxv1i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_sf_vc_v_ivw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { @@ -1579,12 +1579,12 @@ vuint32mf2_t test_sf_vc_v_ivw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, size_ // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.i32.nxv2i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.i32.nxv2i32.nxv2i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.i64.nxv2i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.i64.nxv2i32.nxv2i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_sf_vc_v_ivw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { @@ -1593,12 +1593,12 @@ vuint32m1_t test_sf_vc_v_ivw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, size_t // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.i32.nxv4i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.i32.nxv4i32.nxv4i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.i64.nxv4i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.i64.nxv4i32.nxv4i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_sf_vc_v_ivw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { @@ -1607,12 +1607,12 @@ vuint32m2_t test_sf_vc_v_ivw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.i32.nxv8i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.i32.nxv8i32.nxv8i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.i64.nxv8i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.i64.nxv8i32.nxv8i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_sf_vc_v_ivw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { @@ -1621,12 +1621,12 @@ vuint32m4_t test_sf_vc_v_ivw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.i32.nxv16i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.i32.nxv16i32.nxv16i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.i64.nxv16i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.i64.nxv16i32.nxv16i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_sf_vc_v_ivw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { @@ -1635,12 +1635,12 @@ vuint32m8_t test_sf_vc_v_ivw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.i32.nxv1i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.i32.nxv1i64.nxv1i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.i64.nxv1i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.i64.nxv1i64.nxv1i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_sf_vc_v_ivw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { @@ -1649,12 +1649,12 @@ vuint64m1_t test_sf_vc_v_ivw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, size_t // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.i32.nxv2i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.i32.nxv2i64.nxv2i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.i64.nxv2i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.i64.nxv2i64.nxv2i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_sf_vc_v_ivw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { @@ -1663,12 +1663,12 @@ vuint64m2_t test_sf_vc_v_ivw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.i32.nxv4i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.i32.nxv4i64.nxv4i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.i64.nxv4i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.i64.nxv4i64.nxv4i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_sf_vc_v_ivw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { @@ -1677,12 +1677,12 @@ vuint64m4_t test_sf_vc_v_ivw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.i32.nxv8i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.i32.nxv8i64.nxv8i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.i64.nxv8i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.i64.nxv8i64.nxv8i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_sf_vc_v_ivw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { @@ -1691,12 +1691,12 @@ vuint64m8_t test_sf_vc_v_ivw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, size_t vl // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv1i16.i32.nxv1i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv1i16.i32.nxv1i16.nxv1i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv1i16.i64.nxv1i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv1i16.i64.nxv1i16.nxv1i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_sf_vc_v_ivw_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { @@ -1705,12 +1705,12 @@ vuint16mf4_t test_sf_vc_v_ivw_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv2i16.i32.nxv2i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv2i16.i32.nxv2i16.nxv2i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv2i16.i64.nxv2i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv2i16.i64.nxv2i16.nxv2i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_sf_vc_v_ivw_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { @@ -1719,12 +1719,12 @@ vuint16mf2_t test_sf_vc_v_ivw_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv4i16.i32.nxv4i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv4i16.i32.nxv4i16.nxv4i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv4i16.i64.nxv4i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv4i16.i64.nxv4i16.nxv4i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_sf_vc_v_ivw_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { @@ -1733,12 +1733,12 @@ vuint16m1_t test_sf_vc_v_ivw_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv8i16.i32.nxv8i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv8i16.i32.nxv8i16.nxv8i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv8i16.i64.nxv8i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv8i16.i64.nxv8i16.nxv8i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_sf_vc_v_ivw_u8m1(vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { @@ -1747,12 +1747,12 @@ vuint16m2_t test_sf_vc_v_ivw_u8m1(vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv16i16.i32.nxv16i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv16i16.i32.nxv16i16.nxv16i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv16i16.i64.nxv16i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv16i16.i64.nxv16i16.nxv16i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_sf_vc_v_ivw_u8m2(vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { @@ -1761,12 +1761,12 @@ vuint16m4_t test_sf_vc_v_ivw_u8m2(vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv32i16.i32.nxv32i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv32i16.i32.nxv32i16.nxv32i8.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv32i16.i64.nxv32i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv32i16.i64.nxv32i16.nxv32i8.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_sf_vc_v_ivw_u8m4(vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { @@ -1775,12 +1775,12 @@ vuint16m8_t test_sf_vc_v_ivw_u8m4(vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv1i32.i32.nxv1i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv1i32.i32.nxv1i32.nxv1i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv1i32.i64.nxv1i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv1i32.i64.nxv1i32.nxv1i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_sf_vc_v_ivw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { @@ -1789,12 +1789,12 @@ vuint32mf2_t test_sf_vc_v_ivw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, size_t v // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv2i32.i32.nxv2i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv2i32.i32.nxv2i32.nxv2i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv2i32.i64.nxv2i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv2i32.i64.nxv2i32.nxv2i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_sf_vc_v_ivw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { @@ -1803,12 +1803,12 @@ vuint32m1_t test_sf_vc_v_ivw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv4i32.i32.nxv4i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv4i32.i32.nxv4i32.nxv4i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv4i32.i64.nxv4i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv4i32.i64.nxv4i32.nxv4i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_sf_vc_v_ivw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { @@ -1817,12 +1817,12 @@ vuint32m2_t test_sf_vc_v_ivw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv8i32.i32.nxv8i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv8i32.i32.nxv8i32.nxv8i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv8i32.i64.nxv8i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv8i32.i64.nxv8i32.nxv8i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_sf_vc_v_ivw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { @@ -1831,12 +1831,12 @@ vuint32m4_t test_sf_vc_v_ivw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv16i32.i32.nxv16i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv16i32.i32.nxv16i32.nxv16i16.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv16i32.i64.nxv16i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv16i32.i64.nxv16i32.nxv16i16.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_sf_vc_v_ivw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { @@ -1845,12 +1845,12 @@ vuint32m8_t test_sf_vc_v_ivw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv1i64.i32.nxv1i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv1i64.i32.nxv1i64.nxv1i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv1i64.i64.nxv1i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv1i64.i64.nxv1i64.nxv1i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_sf_vc_v_ivw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { @@ -1859,12 +1859,12 @@ vuint64m1_t test_sf_vc_v_ivw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv2i64.i32.nxv2i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv2i64.i32.nxv2i64.nxv2i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv2i64.i64.nxv2i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv2i64.i64.nxv2i64.nxv2i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_sf_vc_v_ivw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { @@ -1873,12 +1873,12 @@ vuint64m2_t test_sf_vc_v_ivw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv4i64.i32.nxv4i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv4i64.i32.nxv4i64.nxv4i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv4i64.i64.nxv4i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv4i64.i64.nxv4i64.nxv4i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_sf_vc_v_ivw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { @@ -1887,12 +1887,12 @@ vuint64m4_t test_sf_vc_v_ivw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv8i64.i32.nxv8i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv8i64.i32.nxv8i64.nxv8i32.i32.i32(i32 3, [[VD:%.*]], [[VS2:%.*]], i32 10, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv8i64.i64.nxv8i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.ivw.nxv8i64.i64.nxv8i64.nxv8i32.i64.i64(i64 3, [[VD:%.*]], [[VS2:%.*]], i64 10, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_sf_vc_v_ivw_u32m4(vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { @@ -2027,12 +2027,12 @@ void test_sf_vc_fvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, float fs1, size_t // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv1i32.i32.nxv1i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv1i32.i32.nxv1i32.nxv1i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv1i32.i64.nxv1i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv1i32.i64.nxv1i32.nxv1i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_sf_vc_v_fvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, _Float16 fs1, size_t vl) { @@ -2041,12 +2041,12 @@ vuint32mf2_t test_sf_vc_v_fvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, _Floa // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv2i32.i32.nxv2i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv2i32.i32.nxv2i32.nxv2i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv2i32.i64.nxv2i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv2i32.i64.nxv2i32.nxv2i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_sf_vc_v_fvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, _Float16 fs1, size_t vl) { @@ -2055,12 +2055,12 @@ vuint32m1_t test_sf_vc_v_fvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, _Float1 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv4i32.i32.nxv4i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv4i32.i32.nxv4i32.nxv4i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv4i32.i64.nxv4i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv4i32.i64.nxv4i32.nxv4i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_sf_vc_v_fvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, _Float16 fs1, size_t vl) { @@ -2069,12 +2069,12 @@ vuint32m2_t test_sf_vc_v_fvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, _Float16 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv8i32.i32.nxv8i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv8i32.i32.nxv8i32.nxv8i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv8i32.i64.nxv8i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv8i32.i64.nxv8i32.nxv8i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_sf_vc_v_fvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, _Float16 fs1, size_t vl) { @@ -2083,12 +2083,12 @@ vuint32m4_t test_sf_vc_v_fvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, _Float16 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv16i32.i32.nxv16i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv16i32.i32.nxv16i32.nxv16i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv16i32.i64.nxv16i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv16i32.i64.nxv16i32.nxv16i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_sf_vc_v_fvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, _Float16 fs1, size_t vl) { @@ -2097,12 +2097,12 @@ vuint32m8_t test_sf_vc_v_fvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, _Float16 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv1i64.i32.nxv1i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv1i64.i32.nxv1i64.nxv1i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv1i64.i64.nxv1i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv1i64.i64.nxv1i64.nxv1i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_sf_vc_v_fvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, float fs1, size_t vl) { @@ -2111,12 +2111,12 @@ vuint64m1_t test_sf_vc_v_fvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, float f // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv2i64.i32.nxv2i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv2i64.i32.nxv2i64.nxv2i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv2i64.i64.nxv2i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv2i64.i64.nxv2i64.nxv2i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_sf_vc_v_fvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, float fs1, size_t vl) { @@ -2125,12 +2125,12 @@ vuint64m2_t test_sf_vc_v_fvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, float fs1 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv4i64.i32.nxv4i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv4i64.i32.nxv4i64.nxv4i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv4i64.i64.nxv4i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv4i64.i64.nxv4i64.nxv4i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_sf_vc_v_fvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, float fs1, size_t vl) { @@ -2139,12 +2139,12 @@ vuint64m4_t test_sf_vc_v_fvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, float fs1 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv8i64.i32.nxv8i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv8i64.i32.nxv8i64.nxv8i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv8i64.i64.nxv8i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.se.nxv8i64.i64.nxv8i64.nxv8i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_sf_vc_v_fvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, float fs1, size_t vl) { @@ -2153,12 +2153,12 @@ vuint64m8_t test_sf_vc_v_fvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, float fs1 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv1i32.i32.nxv1i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv1i32.i32.nxv1i32.nxv1i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv1i32.i64.nxv1i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv1i32.i64.nxv1i32.nxv1i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_sf_vc_v_fvw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, _Float16 fs1, size_t vl) { @@ -2167,12 +2167,12 @@ vuint32mf2_t test_sf_vc_v_fvw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, _Float16 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv2i32.i32.nxv2i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv2i32.i32.nxv2i32.nxv2i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv2i32.i64.nxv2i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv2i32.i64.nxv2i32.nxv2i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_sf_vc_v_fvw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, _Float16 fs1, size_t vl) { @@ -2181,12 +2181,12 @@ vuint32m1_t test_sf_vc_v_fvw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, _Float16 f // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv4i32.i32.nxv4i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv4i32.i32.nxv4i32.nxv4i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv4i32.i64.nxv4i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv4i32.i64.nxv4i32.nxv4i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_sf_vc_v_fvw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, _Float16 fs1, size_t vl) { @@ -2195,12 +2195,12 @@ vuint32m2_t test_sf_vc_v_fvw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, _Float16 fs1 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv8i32.i32.nxv8i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv8i32.i32.nxv8i32.nxv8i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv8i32.i64.nxv8i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv8i32.i64.nxv8i32.nxv8i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_sf_vc_v_fvw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, _Float16 fs1, size_t vl) { @@ -2209,12 +2209,12 @@ vuint32m4_t test_sf_vc_v_fvw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, _Float16 fs1 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv16i32.i32.nxv16i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv16i32.i32.nxv16i32.nxv16i16.f16.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv16i32.i64.nxv16i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv16i32.i64.nxv16i32.nxv16i16.f16.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_sf_vc_v_fvw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, _Float16 fs1, size_t vl) { @@ -2223,12 +2223,12 @@ vuint32m8_t test_sf_vc_v_fvw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, _Float16 fs1 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv1i64.i32.nxv1i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv1i64.i32.nxv1i64.nxv1i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv1i64.i64.nxv1i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv1i64.i64.nxv1i64.nxv1i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_sf_vc_v_fvw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, float fs1, size_t vl) { @@ -2237,12 +2237,12 @@ vuint64m1_t test_sf_vc_v_fvw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, float fs1, // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv2i64.i32.nxv2i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv2i64.i32.nxv2i64.nxv2i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv2i64.i64.nxv2i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv2i64.i64.nxv2i64.nxv2i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_sf_vc_v_fvw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, float fs1, size_t vl) { @@ -2251,12 +2251,12 @@ vuint64m2_t test_sf_vc_v_fvw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, float fs1, s // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv4i64.i32.nxv4i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv4i64.i32.nxv4i64.nxv4i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv4i64.i64.nxv4i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv4i64.i64.nxv4i64.nxv4i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_sf_vc_v_fvw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, float fs1, size_t vl) { @@ -2265,12 +2265,12 @@ vuint64m4_t test_sf_vc_v_fvw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, float fs1, s // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv8i64.i32.nxv8i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv8i64.i32.nxv8i64.nxv8i32.f32.i32(i32 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv8i64.i64.nxv8i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.sf.vc.v.fvw.nxv8i64.i64.nxv8i64.nxv8i32.f32.i64(i64 1, [[VD:%.*]], [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_sf_vc_v_fvw_u32m4(vuint64m8_t vd, vuint32m4_t vs2, float fs1, size_t vl) { diff --git a/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td b/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td index f55669bded693..008e86869b8bb 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td @@ -42,7 +42,7 @@ let TargetPrefix = "riscv" in { // (bit<27-26>, bit<11-7>, vector_in, vector_in/scalar_in, vl) class RISCVSFCustomVC_XV : Intrinsic], + !listconcat(!if(HasDst, [llvm_anyint_ty, llvm_anyvector_ty], [llvm_anyint_ty, LLVMMatchType<0>, llvm_anyvector_ty]), [llvm_any_ty, llvm_anyint_ty]), !listconcat([IntrNoMem, ImmArg>], // bit<27-26> @@ -61,8 +61,8 @@ let TargetPrefix = "riscv" in { // (bit<27-26>, vector_in, vector_in, vector_in/scalar_in, vl) class RISCVSFCustomVC_XVV : Intrinsic, LLVMMatchType<0>], - [llvm_anyint_ty, llvm_anyvector_ty, LLVMMatchType<1>]), + !listconcat(!if(HasDst, [llvm_anyint_ty, llvm_anyvector_ty, llvm_anyvector_ty], + [llvm_anyint_ty, llvm_anyvector_ty, llvm_anyvector_ty]), [llvm_any_ty, llvm_anyint_ty]), !listconcat([IntrNoMem, ImmArg>], // bit<27-26> !if(ImmScalar, [ImmArg>], []), // ScalarOperand @@ -76,7 +76,7 @@ let TargetPrefix = "riscv" in { // (bit<27-26>, wvector_in, vector_in, vector_in/scalar_in, vl) class RISCVSFCustomVC_XVW : Intrinsic, llvm_anyvector_ty], + !listconcat(!if(HasDst, [llvm_anyint_ty, llvm_anyvector_ty, llvm_anyvector_ty], [llvm_anyint_ty, llvm_anyvector_ty, llvm_anyvector_ty]), [llvm_any_ty, llvm_anyint_ty]), !listconcat([IntrNoMem, ImmArg>], // bit<27-26> diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 26190337eb3bd..f722a0b57a506 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -8190,6 +8190,28 @@ static SDValue lowerGetVectorLength(SDNode *N, SelectionDAG &DAG, return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), Res); } +static void getVCIXOperands(SDValue &Op, SelectionDAG &DAG, + SmallVector &Ops) { + SDLoc DL(Op); + + const RISCVSubtarget &Subtarget = + DAG.getMachineFunction().getSubtarget(); + for (const SDValue &V : Op->op_values()) { + EVT ValType = V.getValueType(); + if (ValType.isScalableVector() && ValType.isFloatingPoint()) { + MVT InterimIVT = + MVT::getVectorVT(MVT::getIntegerVT(ValType.getScalarSizeInBits()), + ValType.getVectorElementCount()); + Ops.push_back(DAG.getBitcast(InterimIVT, V)); + } else if (ValType.isFixedLengthVector()) { + MVT OpContainerVT = getContainerForFixedLengthVector( + DAG, V.getSimpleValueType(), Subtarget); + Ops.push_back(convertToScalableVector(OpContainerVT, V, DAG, Subtarget)); + } else + Ops.push_back(V); + } +} + // LMUL * VLEN should be greater than or equal to EGS * SEW static inline bool isValidEGW(int EGS, EVT VT, const RISCVSubtarget &Subtarget) { @@ -8417,26 +8439,27 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, case Intrinsic::riscv_sf_vc_v_fvw: { MVT VT = Op.getSimpleValueType(); - if (!VT.isFixedLengthVector()) - break; + SmallVector Ops; + getVCIXOperands(Op, DAG, Ops); - SmallVector Ops; - for (const SDValue &V : Op->op_values()) { - // Skip non-fixed vector operands. - if (!V.getValueType().isFixedLengthVector()) { - Ops.push_back(V); - continue; - } + MVT RetVT = VT; + if (VT.isFixedLengthVector()) + RetVT = getContainerForFixedLengthVector(VT); + else if (VT.isFloatingPoint()) + RetVT = MVT::getVectorVT(MVT::getIntegerVT(VT.getScalarSizeInBits()), + VT.getVectorElementCount()); - MVT OpContainerVT = - getContainerForFixedLengthVector(V.getSimpleValueType()); - Ops.push_back(convertToScalableVector(OpContainerVT, V, DAG, Subtarget)); - } + SDValue NewNode = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, RetVT, Ops); + + if (VT.isFixedLengthVector()) + NewNode = convertFromScalableVector(VT, NewNode, DAG, Subtarget); + else if (VT.isFloatingPoint()) + NewNode = DAG.getBitcast(VT, NewNode); + + if (Op == NewNode) + break; - MVT RetContainerVT = getContainerForFixedLengthVector(VT); - SDValue Scalable = - DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, RetContainerVT, Ops); - return convertFromScalableVector(VT, Scalable, DAG, Subtarget); + return NewNode; } } @@ -8573,30 +8596,33 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, case Intrinsic::riscv_sf_vc_v_vvw_se: case Intrinsic::riscv_sf_vc_v_fvw_se: { MVT VT = Op.getSimpleValueType(); + SDLoc DL(Op); + SmallVector Ops; + getVCIXOperands(Op, DAG, Ops); - if (!VT.isFixedLengthVector()) - break; + MVT RetVT = VT; + if (VT.isFixedLengthVector()) + RetVT = getContainerForFixedLengthVector(VT); + else if (VT.isFloatingPoint()) + RetVT = MVT::getVectorVT(MVT::getIntegerVT(RetVT.getScalarSizeInBits()), + RetVT.getVectorElementCount()); - SmallVector Ops; - for (const SDValue &V : Op->op_values()) { - // Skip non-fixed vector operands. - if (!V.getValueType().isFixedLengthVector()) { - Ops.push_back(V); - continue; - } + SDVTList VTs = DAG.getVTList({RetVT, MVT::Other}); + SDValue NewNode = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops); - MVT OpContainerVT = - getContainerForFixedLengthVector(V.getSimpleValueType()); - Ops.push_back(convertToScalableVector(OpContainerVT, V, DAG, Subtarget)); + if (VT.isFixedLengthVector()) { + SDValue FixedVector = + convertFromScalableVector(VT, NewNode, DAG, Subtarget); + NewNode = DAG.getMergeValues({FixedVector, NewNode.getValue(1)}, DL); + } else if (VT.isFloatingPoint()) { + SDValue BitCast = DAG.getBitcast(VT, NewNode.getValue(0)); + NewNode = DAG.getMergeValues({BitCast, NewNode.getValue(1)}, DL); } - SDLoc DL(Op); - MVT RetContainerVT = getContainerForFixedLengthVector(VT); - SDVTList VTs = DAG.getVTList({RetContainerVT, MVT::Other}); - SDValue ScalableVector = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops); - SDValue FixedVector = - convertFromScalableVector(VT, ScalableVector, DAG, Subtarget); - return DAG.getMergeValues({FixedVector, ScalableVector.getValue(1)}, DL); + if (Op == NewNode) + break; + + return NewNode; } } @@ -8741,25 +8767,16 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op, case Intrinsic::riscv_sf_vc_ivw_se: case Intrinsic::riscv_sf_vc_vvw_se: case Intrinsic::riscv_sf_vc_fvw_se: { - if (!llvm::any_of(Op->op_values(), [&](const SDValue &V) { - return V.getValueType().isFixedLengthVector(); - })) - break; + SmallVector Ops; + getVCIXOperands(Op, DAG, Ops); - SmallVector Ops; - for (const SDValue &V : Op->op_values()) { - // Skip non-fixed vector operands. - if (!V.getValueType().isFixedLengthVector()) { - Ops.push_back(V); - continue; - } + SDValue NewNode = + DAG.getNode(ISD::INTRINSIC_VOID, SDLoc(Op), Op->getVTList(), Ops); - MVT OpContainerVT = - getContainerForFixedLengthVector(V.getSimpleValueType()); - Ops.push_back(convertToScalableVector(OpContainerVT, V, DAG, Subtarget)); - } + if (Op == NewNode) + break; - return DAG.getNode(ISD::INTRINSIC_VOID, SDLoc(Op), Op->getVTList(), Ops); + return NewNode; } } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td index 56e59a1641989..235cfd3f604b7 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td @@ -600,7 +600,7 @@ multiclass VPatVFNRCLIP { } let Predicates = [HasVendorXSfvcp] in { - foreach vti = AllVectors in { + foreach vti = AllIntegerVectors in { defm : VPatVC_X<"x", "X", vti, XLenVT, GPR>; defm : VPatVC_X<"i", "I", vti, XLenVT, tsimm5>; defm : VPatVC_XV<"xv", "XV", vti, XLenVT, GPR>; @@ -618,7 +618,7 @@ let Predicates = [HasVendorXSfvcp] in { finfo.ScalarRegClass, payload1>; } } - foreach VtiToWti = !listconcat(AllWidenableIntVectors, AllWidenableFloatVectors) in { + foreach VtiToWti = AllWidenableIntVectors in { defvar vti = VtiToWti.Vti; defvar wti = VtiToWti.Wti; defvar iinfo = GetIntVTypeInfo.Vti; diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-x.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-x.ll index 68b92f975a52d..4c5b5cfbd9d96 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-x.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+xsfvcp \ +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+xsfvcp \ ; RUN: -verify-machineinstrs | FileCheck %s -; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvcp \ +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+xsfvcp \ ; RUN: -verify-machineinstrs | FileCheck %s define void @test_sf_vc_x_se_e8mf8(i8 zeroext %rs1, iXLen %vl) { @@ -1563,3 +1563,290 @@ entry: } declare <8 x i64> @llvm.riscv.sf.vc.v.i.nxv8i64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define <1 x half> @test_sf_vc_fv_x_se_e16mf4(i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_x_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 4, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <1 x half> @llvm.riscv.sf.vc.v.x.se.nxv1f16.i16.iXLen(iXLen 3, iXLen 4, i16 %rs1, iXLen %vl) + ret <1 x half> %0 +} + +declare <1 x half> @llvm.riscv.sf.vc.v.x.se.nxv1f16.i16.iXLen(iXLen, iXLen, i16, iXLen) + +define <2 x half> @test_sf_vc_fv_x_se_e16mf2(i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_x_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 4, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <2 x half> @llvm.riscv.sf.vc.v.x.se.nxv2f16.i16.iXLen(iXLen 3, iXLen 4, i16 %rs1, iXLen %vl) + ret <2 x half> %0 +} + +declare <2 x half> @llvm.riscv.sf.vc.v.x.se.nxv2f16.i16.iXLen(iXLen, iXLen, i16, iXLen) + +define <4 x half> @test_sf_vc_fv_x_se_e16m1(i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_x_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 4, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <4 x half> @llvm.riscv.sf.vc.v.x.se.nxv4f16.i16.iXLen(iXLen 3, iXLen 4, i16 %rs1, iXLen %vl) + ret <4 x half> %0 +} + +declare <4 x half> @llvm.riscv.sf.vc.v.x.se.nxv4f16.i16.iXLen(iXLen, iXLen, i16, iXLen) + +define <8 x half> @test_sf_vc_fv_x_se_e16m2(i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_x_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 4, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <8 x half> @llvm.riscv.sf.vc.v.x.se.nxv8f16.i16.iXLen(iXLen 3, iXLen 4, i16 %rs1, iXLen %vl) + ret <8 x half> %0 +} + +declare <8 x half> @llvm.riscv.sf.vc.v.x.se.nxv8f16.i16.iXLen(iXLen, iXLen, i16, iXLen) + +define <16 x half> @test_sf_vc_fv_x_se_e16m4(i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_x_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 4, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <16 x half> @llvm.riscv.sf.vc.v.x.se.nxv16f16.i16.iXLen(iXLen 3, iXLen 4, i16 %rs1, iXLen %vl) + ret <16 x half> %0 +} + +declare <16 x half> @llvm.riscv.sf.vc.v.x.se.nxv16f16.i16.iXLen(iXLen, iXLen, i16, iXLen) + +define <32 x half> @test_sf_vc_fv_x_se_e16m8(i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_x_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 4, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <32 x half> @llvm.riscv.sf.vc.v.x.se.nxv32f16.i16.iXLen(iXLen 3, iXLen 4, i16 %rs1, iXLen %vl) + ret <32 x half> %0 +} + +declare <32 x half> @llvm.riscv.sf.vc.v.x.se.nxv32f16.i16.iXLen(iXLen, iXLen, i16, iXLen) + +define <1 x float> @test_sf_vc_fv_x_se_e32mf2(i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_x_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 4, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <1 x float> @llvm.riscv.sf.vc.v.x.se.nxv1f32.i32.iXLen(iXLen 3, iXLen 4, i32 %rs1, iXLen %vl) + ret <1 x float> %0 +} + +declare <1 x float> @llvm.riscv.sf.vc.v.x.se.nxv1f32.i32.iXLen(iXLen, iXLen, i32, iXLen) + +define <2 x float> @test_sf_vc_fv_x_se_e32m1(i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_x_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 4, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <2 x float> @llvm.riscv.sf.vc.v.x.se.nxv2f32.i32.iXLen(iXLen 3, iXLen 4, i32 %rs1, iXLen %vl) + ret <2 x float> %0 +} + +declare <2 x float> @llvm.riscv.sf.vc.v.x.se.nxv2f32.i32.iXLen(iXLen, iXLen, i32, iXLen) + +define <4 x float> @test_sf_vc_fv_x_se_e32m2(i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_x_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 4, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <4 x float> @llvm.riscv.sf.vc.v.x.se.nxv4f32.i32.iXLen(iXLen 3, iXLen 4, i32 %rs1, iXLen %vl) + ret <4 x float> %0 +} + +declare <4 x float> @llvm.riscv.sf.vc.v.x.se.nxv4f32.i32.iXLen(iXLen, iXLen, i32, iXLen) + +define <8 x float> @test_sf_vc_fv_x_se_e32m4(i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_x_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 4, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <8 x float> @llvm.riscv.sf.vc.v.x.se.nxv8f32.i32.iXLen(iXLen 3, iXLen 4, i32 %rs1, iXLen %vl) + ret <8 x float> %0 +} + +declare <8 x float> @llvm.riscv.sf.vc.v.x.se.nxv8f32.i32.iXLen(iXLen, iXLen, i32, iXLen) + +define <16 x float> @test_sf_vc_fv_x_se_e32m8(i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_x_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 4, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <16 x float> @llvm.riscv.sf.vc.v.x.se.nxv16f32.i32.iXLen(iXLen 3, iXLen 4, i32 %rs1, iXLen %vl) + ret <16 x float> %0 +} + +declare <16 x float> @llvm.riscv.sf.vc.v.x.se.nxv16f32.i32.iXLen(iXLen, iXLen, i32, iXLen) + +define <1 x half> @test_sf_vc_fv_i_se_e16mf4(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_i_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 8, v8, 4 +; CHECK-NEXT: ret +entry: + %0 = tail call <1 x half> @llvm.riscv.sf.vc.v.i.se.nxv1f16.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) + ret <1 x half> %0 +} + +declare <1 x half> @llvm.riscv.sf.vc.v.i.se.nxv1f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define <2 x half> @test_sf_vc_fv_i_se_e16mf2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_i_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 8, v8, 4 +; CHECK-NEXT: ret +entry: + %0 = tail call <2 x half> @llvm.riscv.sf.vc.v.i.se.nxv2f16.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) + ret <2 x half> %0 +} + +declare <2 x half> @llvm.riscv.sf.vc.v.i.se.nxv2f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define <4 x half> @test_sf_vc_fv_i_se_e16m1(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_i_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 8, v8, 4 +; CHECK-NEXT: ret +entry: + %0 = tail call <4 x half> @llvm.riscv.sf.vc.v.i.se.nxv4f16.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) + ret <4 x half> %0 +} + +declare <4 x half> @llvm.riscv.sf.vc.v.i.se.nxv4f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define <8 x half> @test_sf_vc_fv_i_se_e16m2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_i_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 8, v8, 4 +; CHECK-NEXT: ret +entry: + %0 = tail call <8 x half> @llvm.riscv.sf.vc.v.i.se.nxv8f16.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) + ret <8 x half> %0 +} + +declare <8 x half> @llvm.riscv.sf.vc.v.i.se.nxv8f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define <16 x half> @test_sf_vc_fv_i_se_e16m4(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_i_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 8, v8, 4 +; CHECK-NEXT: ret +entry: + %0 = tail call <16 x half> @llvm.riscv.sf.vc.v.i.se.nxv16f16.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) + ret <16 x half> %0 +} + +declare <16 x half> @llvm.riscv.sf.vc.v.i.se.nxv16f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define <32 x half> @test_sf_vc_fv_i_se_e16m8(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_i_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 8, v8, 4 +; CHECK-NEXT: ret +entry: + %0 = tail call <32 x half> @llvm.riscv.sf.vc.v.i.se.nxv32f16.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) + ret <32 x half> %0 +} + +declare <32 x half> @llvm.riscv.sf.vc.v.i.se.nxv32f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define <1 x float> @test_sf_vc_fv_i_se_e32mf2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_i_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 8, v8, 4 +; CHECK-NEXT: ret +entry: + %0 = tail call <1 x float> @llvm.riscv.sf.vc.v.i.se.nxv1f32.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) + ret <1 x float> %0 +} + +declare <1 x float> @llvm.riscv.sf.vc.v.i.se.nxv1f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define <2 x float> @test_sf_vc_fv_i_se_e32m1(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_i_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 8, v8, 4 +; CHECK-NEXT: ret +entry: + %0 = tail call <2 x float> @llvm.riscv.sf.vc.v.i.se.nxv2f32.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) + ret <2 x float> %0 +} + +declare <2 x float> @llvm.riscv.sf.vc.v.i.se.nxv2f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define <4 x float> @test_sf_vc_fv_i_se_e32m2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_i_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 8, v8, 4 +; CHECK-NEXT: ret +entry: + %0 = tail call <4 x float> @llvm.riscv.sf.vc.v.i.se.nxv4f32.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) + ret <4 x float> %0 +} + +declare <4 x float> @llvm.riscv.sf.vc.v.i.se.nxv4f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define <8 x float> @test_sf_vc_fv_i_se_e32m4(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_i_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 8, v8, 4 +; CHECK-NEXT: ret +entry: + %0 = tail call <8 x float> @llvm.riscv.sf.vc.v.i.se.nxv8f32.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) + ret <8 x float> %0 +} + +declare <8 x float> @llvm.riscv.sf.vc.v.i.se.nxv8f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define <16 x float> @test_sf_vc_fv_i_se_e32m8(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_i_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 8, v8, 4 +; CHECK-NEXT: ret +entry: + %0 = tail call <16 x float> @llvm.riscv.sf.vc.v.i.se.nxv16f32.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) + ret <16 x float> %0 +} + +declare <16 x float> @llvm.riscv.sf.vc.v.i.se.nxv16f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xv.ll index 12a149de6a4df..e845dffecff8e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xv.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+xsfvcp \ +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+xsfvcp \ ; RUN: -verify-machineinstrs | FileCheck %s -; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+xsfvcp \ +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+xsfvcp \ ; RUN: -verify-machineinstrs | FileCheck %s define void @test_sf_vc_vv_se_e8mf8(<1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) { @@ -2422,587 +2422,1251 @@ entry: declare <8 x i64> @llvm.riscv.sf.vc.v.iv.nxv8i64.iXLen.iXLen.iXLen(iXLen, <8 x i64>, iXLen, iXLen) -define void @test_sf_vc_fv_se_e16mf4(<1 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e16mf4: +define void @test_sf_vc_fvv_se_e16mf4(<1 x half> %vs2, <1 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e16mf4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f16.nxv1i16.iXLen(iXLen 3, iXLen 31, <1 x half> %vs2, <1 x i16> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f16.nxv1i16.iXLen(iXLen, iXLen, <1 x half>, <1 x i16>, iXLen) + +define <1 x half> @test_sf_vc_v_fvv_se_e16mf4(<1 x half> %vs2, <1 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call <1 x half> @llvm.riscv.sf.vc.v.vv.se.nxv1f16.iXLen.nxv1f16.iXLen(iXLen 3, <1 x half> %vs2, <1 x i16> %vs1, iXLen %vl) + ret <1 x half> %0 +} + +declare <1 x half> @llvm.riscv.sf.vc.v.vv.se.nxv1f16.iXLen.nxv1f16.iXLen(iXLen, <1 x half>, <1 x i16>, iXLen) + +define void @test_sf_vc_fvv_se_e16mf2(<2 x half> %vs2, <2 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f16.nxv2i16.iXLen(iXLen 3, iXLen 31, <2 x half> %vs2, <2 x i16> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f16.nxv2i16.iXLen(iXLen, iXLen, <2 x half>, <2 x i16>, iXLen) + +define <2 x half> @test_sf_vc_v_fvv_se_e16mf2(<2 x half> %vs2, <2 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call <2 x half> @llvm.riscv.sf.vc.v.vv.se.nxv2f16.iXLen.nxv2f16.iXLen(iXLen 3, <2 x half> %vs2, <2 x i16> %vs1, iXLen %vl) + ret <2 x half> %0 +} + +declare <2 x half> @llvm.riscv.sf.vc.v.vv.se.nxv2f16.iXLen.nxv2f16.iXLen(iXLen, <2 x half>, <2 x i16>, iXLen) + +define void @test_sf_vc_fvv_se_e16m1(<4 x half> %vs2, <4 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f16.nxv4i16.iXLen(iXLen 3, iXLen 31, <4 x half> %vs2, <4 x i16> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f16.nxv4i16.iXLen(iXLen, iXLen, <4 x half>, <4 x i16>, iXLen) + +define <4 x half> @test_sf_vc_v_fvv_se_e16m1(<4 x half> %vs2, <4 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call <4 x half> @llvm.riscv.sf.vc.v.vv.se.nxv4f16.iXLen.nxv4f16.iXLen(iXLen 3, <4 x half> %vs2, <4 x i16> %vs1, iXLen %vl) + ret <4 x half> %0 +} + +declare <4 x half> @llvm.riscv.sf.vc.v.vv.se.nxv4f16.iXLen.nxv4f16.iXLen(iXLen, <4 x half>, <4 x i16>, iXLen) + +define void @test_sf_vc_fvv_se_e16m2(<8 x half> %vs2, <8 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f16.nxv8i16.iXLen(iXLen 3, iXLen 31, <8 x half> %vs2, <8 x i16> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f16.nxv8i16.iXLen(iXLen, iXLen, <8 x half>, <8 x i16>, iXLen) + +define <8 x half> @test_sf_vc_v_fvv_se_e16m2(<8 x half> %vs2, <8 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call <8 x half> @llvm.riscv.sf.vc.v.vv.se.nxv8f16.iXLen.nxv8f16.iXLen(iXLen 3, <8 x half> %vs2, <8 x i16> %vs1, iXLen %vl) + ret <8 x half> %0 +} + +declare <8 x half> @llvm.riscv.sf.vc.v.vv.se.nxv8f16.iXLen.nxv8f16.iXLen(iXLen, <8 x half>, <8 x i16>, iXLen) + +define void @test_sf_vc_fvv_se_e16m4(<16 x half> %vs2, <16 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f16.nxv16i16.iXLen(iXLen 3, iXLen 31, <16 x half> %vs2, <16 x i16> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f16.nxv16i16.iXLen(iXLen, iXLen, <16 x half>, <16 x i16>, iXLen) + +define <16 x half> @test_sf_vc_v_fvv_se_e16m4(<16 x half> %vs2, <16 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call <16 x half> @llvm.riscv.sf.vc.v.vv.se.nxv16f16.iXLen.nxv16f16.iXLen(iXLen 3, <16 x half> %vs2, <16 x i16> %vs1, iXLen %vl) + ret <16 x half> %0 +} + +declare <16 x half> @llvm.riscv.sf.vc.v.vv.se.nxv16f16.iXLen.nxv16f16.iXLen(iXLen, <16 x half>, <16 x i16>, iXLen) + +define void @test_sf_vc_fvv_se_e16m8(<32 x half> %vs2, <32 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32f16.nxv32i16.iXLen(iXLen 3, iXLen 31, <32 x half> %vs2, <32 x i16> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32f16.nxv32i16.iXLen(iXLen, iXLen, <32 x half>, <32 x i16>, iXLen) + +define <32 x half> @test_sf_vc_v_fvv_se_e16m8(<32 x half> %vs2, <32 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call <32 x half> @llvm.riscv.sf.vc.v.vv.se.nxv32f16.iXLen.nxv32f16.iXLen(iXLen 3, <32 x half> %vs2, <32 x i16> %vs1, iXLen %vl) + ret <32 x half> %0 +} + +declare <32 x half> @llvm.riscv.sf.vc.v.vv.se.nxv32f16.iXLen.nxv32f16.iXLen(iXLen, <32 x half>, <32 x i16>, iXLen) + +define void @test_sf_vc_fvv_se_e32mf2(<1 x float> %vs2, <1 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f32.nxv1i32.iXLen(iXLen 3, iXLen 31, <1 x float> %vs2, <1 x i32> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f32.nxv1i32.iXLen(iXLen, iXLen, <1 x float>, <1 x i32>, iXLen) + +define <1 x float> @test_sf_vc_v_fvv_se_e32mf2(<1 x float> %vs2, <1 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call <1 x float> @llvm.riscv.sf.vc.v.vv.se.nxv1f32.iXLen.nxv1f32.iXLen(iXLen 3, <1 x float> %vs2, <1 x i32> %vs1, iXLen %vl) + ret <1 x float> %0 +} + +declare <1 x float> @llvm.riscv.sf.vc.v.vv.se.nxv1f32.iXLen.nxv1f32.iXLen(iXLen, <1 x float>, <1 x i32>, iXLen) + +define void @test_sf_vc_fvv_se_e32m1(<2 x float> %vs2, <2 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f32.nxv2i32.iXLen(iXLen 3, iXLen 31, <2 x float> %vs2, <2 x i32> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f32.nxv2i32.iXLen(iXLen, iXLen, <2 x float>, <2 x i32>, iXLen) + +define <2 x float> @test_sf_vc_v_fvv_se_e32m1(<2 x float> %vs2, <2 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call <2 x float> @llvm.riscv.sf.vc.v.vv.se.nxv2f32.iXLen.nxv2f32.iXLen(iXLen 3, <2 x float> %vs2, <2 x i32> %vs1, iXLen %vl) + ret <2 x float> %0 +} + +declare <2 x float> @llvm.riscv.sf.vc.v.vv.se.nxv2f32.iXLen.nxv2f32.iXLen(iXLen, <2 x float>, <2 x i32>, iXLen) + +define void @test_sf_vc_fvv_se_e32m2(<4 x float> %vs2, <4 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f32.nxv4i32.iXLen(iXLen 3, iXLen 31, <4 x float> %vs2, <4 x i32> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f32.nxv4i32.iXLen(iXLen, iXLen, <4 x float>, <4 x i32>, iXLen) + +define <4 x float> @test_sf_vc_v_fvv_se_e32m2(<4 x float> %vs2, <4 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call <4 x float> @llvm.riscv.sf.vc.v.vv.se.nxv4f32.iXLen.nxv4f32.iXLen(iXLen 3, <4 x float> %vs2, <4 x i32> %vs1, iXLen %vl) + ret <4 x float> %0 +} + +declare <4 x float> @llvm.riscv.sf.vc.v.vv.se.nxv4f32.iXLen.nxv4f32.iXLen(iXLen, <4 x float>, <4 x i32>, iXLen) + +define void @test_sf_vc_fvv_se_e32m4(<8 x float> %vs2, <8 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f32.nxv8i32.iXLen(iXLen 3, iXLen 31, <8 x float> %vs2, <8 x i32> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f32.nxv8i32.iXLen(iXLen, iXLen, <8 x float>, <8 x i32>, iXLen) + +define <8 x float> @test_sf_vc_v_fvv_se_e32m4(<8 x float> %vs2, <8 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call <8 x float> @llvm.riscv.sf.vc.v.vv.se.nxv8f32.iXLen.nxv8f32.iXLen(iXLen 3, <8 x float> %vs2, <8 x i32> %vs1, iXLen %vl) + ret <8 x float> %0 +} + +declare <8 x float> @llvm.riscv.sf.vc.v.vv.se.nxv8f32.iXLen.nxv8f32.iXLen(iXLen, <8 x float>, <8 x i32>, iXLen) + +define void @test_sf_vc_fvv_se_e32m8(<16 x float> %vs2, <16 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f32.nxv16i32.iXLen(iXLen 3, iXLen 31, <16 x float> %vs2, <16 x i32> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f32.nxv16i32.iXLen(iXLen, iXLen, <16 x float>, <16 x i32>, iXLen) + +define <16 x float> @test_sf_vc_v_fvv_se_e32m8(<16 x float> %vs2, <16 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call <16 x float> @llvm.riscv.sf.vc.v.vv.se.nxv16f32.iXLen.nxv16f32.iXLen(iXLen 3, <16 x float> %vs2, <16 x i32> %vs1, iXLen %vl) + ret <16 x float> %0 +} + +declare <16 x float> @llvm.riscv.sf.vc.v.vv.se.nxv16f32.iXLen.nxv16f32.iXLen(iXLen, <16 x float>, <16 x i32>, iXLen) + +define void @test_sf_vc_fvv_se_e64m1(<1 x double> %vs2, <1 x i64> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f64.nxv1i64.iXLen(iXLen 3, iXLen 31, <1 x double> %vs2, <1 x i64> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f64.nxv1i64.iXLen(iXLen, iXLen, <1 x double>, <1 x i64>, iXLen) + +define <1 x double> @test_sf_vc_v_fvv_se_e64m1(<1 x double> %vs2, <1 x i64> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call <1 x double> @llvm.riscv.sf.vc.v.vv.se.nxv1f64.iXLen.nxv1f64.iXLen(iXLen 3, <1 x double> %vs2, <1 x i64> %vs1, iXLen %vl) + ret <1 x double> %0 +} + +declare <1 x double> @llvm.riscv.sf.vc.v.vv.se.nxv1f64.iXLen.nxv1f64.iXLen(iXLen, <1 x double>, <1 x i64>, iXLen) + +define void @test_sf_vc_fvv_se_e64m2(<2 x double> %vs2, <2 x i64> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f64.nxv2i64.iXLen(iXLen 3, iXLen 31, <2 x double> %vs2, <2 x i64> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f64.nxv2i64.iXLen(iXLen, iXLen, <2 x double>, <2 x i64>, iXLen) + +define <2 x double> @test_sf_vc_v_fvv_se_e64m2(<2 x double> %vs2, <2 x i64> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 +; CHECK-NEXT: ret +entry: + %0 = tail call <2 x double> @llvm.riscv.sf.vc.v.vv.se.nxv2f64.iXLen.nxv2f64.iXLen(iXLen 3, <2 x double> %vs2, <2 x i64> %vs1, iXLen %vl) + ret <2 x double> %0 +} + +declare <2 x double> @llvm.riscv.sf.vc.v.vv.se.nxv2f64.iXLen.nxv2f64.iXLen(iXLen, <2 x double>, <2 x i64>, iXLen) + +define void @test_sf_vc_fvv_se_e64m4(<4 x double> %vs2, <4 x i64> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f64.nxv4i64.iXLen(iXLen 3, iXLen 31, <4 x double> %vs2, <4 x i64> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f64.nxv4i64.iXLen(iXLen, iXLen, <4 x double>, <4 x i64>, iXLen) + +define <4 x double> @test_sf_vc_v_fvv_se_e64m4(<4 x double> %vs2, <4 x i64> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call <4 x double> @llvm.riscv.sf.vc.v.vv.se.nxv4f64.iXLen.nxv4f64.iXLen(iXLen 3, <4 x double> %vs2, <4 x i64> %vs1, iXLen %vl) + ret <4 x double> %0 +} + +declare <4 x double> @llvm.riscv.sf.vc.v.vv.se.nxv4f64.iXLen.nxv4f64.iXLen(iXLen, <4 x double>, <4 x i64>, iXLen) + +define void @test_sf_vc_fvv_se_e64m8(<8 x double> %vs2, <8 x i64> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f64.nxv8i64.iXLen(iXLen 3, iXLen 31, <8 x double> %vs2, <8 x i64> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f64.nxv8i64.iXLen(iXLen, iXLen, <8 x double>, <8 x i64>, iXLen) + +define <8 x double> @test_sf_vc_v_fvv_se_e64m8(<8 x double> %vs2, <8 x i64> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call <8 x double> @llvm.riscv.sf.vc.v.vv.se.nxv8f64.iXLen.nxv8f64.iXLen(iXLen 3, <8 x double> %vs2, <8 x i64> %vs1, iXLen %vl) + ret <8 x double> %0 +} + +declare <8 x double> @llvm.riscv.sf.vc.v.vv.se.nxv8f64.iXLen.nxv8f64.iXLen(iXLen, <8 x double>, <8 x i64>, iXLen) + +define void @test_sf_vc_fvx_se_e16mf4(<1 x half> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvx_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f16.nxv1f16.i16.iXLen(iXLen 3, iXLen 31, <1 x half> %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f16.nxv1f16.i16.iXLen(iXLen, iXLen, <1 x half>, i16, iXLen) + +define <1 x half> @test_sf_vc_v_fvx_se_e16mf4(<1 x half> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvx_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <1 x half> @llvm.riscv.sf.vc.v.xv.se.nxv1f16.nxv1f16.i16.iXLen(iXLen 3, <1 x half> %vs2, i16 %rs1, iXLen %vl) + ret <1 x half> %0 +} + +declare <1 x half> @llvm.riscv.sf.vc.v.xv.se.nxv1f16.nxv1f16.i16.iXLen(iXLen, <1 x half>, i16, iXLen) + +define void @test_sf_vc_fvx_se_e16mf2(<2 x half> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvx_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f16.nxv2f16.i16.iXLen(iXLen 3, iXLen 31, <2 x half> %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f16.nxv2f16.i16.iXLen(iXLen, iXLen, <2 x half>, i16, iXLen) + +define <2 x half> @test_sf_vc_v_fvx_se_e16mf2(<2 x half> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvx_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <2 x half> @llvm.riscv.sf.vc.v.xv.se.nxv2f16.nxv2f16.i16.iXLen(iXLen 3, <2 x half> %vs2, i16 %rs1, iXLen %vl) + ret <2 x half> %0 +} + +declare <2 x half> @llvm.riscv.sf.vc.v.xv.se.nxv2f16.nxv2f16.i16.iXLen(iXLen, <2 x half>, i16, iXLen) + +define void @test_sf_vc_fvx_se_e16m1(<4 x half> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvx_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f16.nxv4f16.i16.iXLen(iXLen 3, iXLen 31, <4 x half> %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f16.nxv4f16.i16.iXLen(iXLen, iXLen, <4 x half>, i16, iXLen) + +define <4 x half> @test_sf_vc_v_fvx_se_e16m1(<4 x half> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvx_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <4 x half> @llvm.riscv.sf.vc.v.xv.se.nxv4f16.nxv4f16.i16.iXLen(iXLen 3, <4 x half> %vs2, i16 %rs1, iXLen %vl) + ret <4 x half> %0 +} + +declare <4 x half> @llvm.riscv.sf.vc.v.xv.se.nxv4f16.nxv4f16.i16.iXLen(iXLen, <4 x half>, i16, iXLen) + +define void @test_sf_vc_fvx_se_e16m2(<8 x half> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvx_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f16.nxv8f16.i16.iXLen(iXLen 3, iXLen 31, <8 x half> %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f16.nxv8f16.i16.iXLen(iXLen, iXLen, <8 x half>, i16, iXLen) + +define <8 x half> @test_sf_vc_v_fvx_se_e16m2(<8 x half> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvx_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <8 x half> @llvm.riscv.sf.vc.v.xv.se.nxv8f16.nxv8f16.i16.iXLen(iXLen 3, <8 x half> %vs2, i16 %rs1, iXLen %vl) + ret <8 x half> %0 +} + +declare <8 x half> @llvm.riscv.sf.vc.v.xv.se.nxv8f16.nxv8f16.i16.iXLen(iXLen, <8 x half>, i16, iXLen) + +define void @test_sf_vc_fvx_se_e16m4(<16 x half> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvx_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f16.nxv16f16.i16.iXLen(iXLen 3, iXLen 31, <16 x half> %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f16.nxv16f16.i16.iXLen(iXLen, iXLen, <16 x half>, i16, iXLen) + +define <16 x half> @test_sf_vc_v_fvx_se_e16m4(<16 x half> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvx_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i16.f16.iXLen(iXLen 1, iXLen 31, <1 x i16> %vs2, half %fs1, iXLen %vl) - ret void + %0 = tail call <16 x half> @llvm.riscv.sf.vc.v.xv.se.nxv16f16.nxv16f16.i16.iXLen(iXLen 3, <16 x half> %vs2, i16 %rs1, iXLen %vl) + ret <16 x half> %0 } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i16.f16.iXLen(iXLen, iXLen, <1 x i16>, half, iXLen) +declare <16 x half> @llvm.riscv.sf.vc.v.xv.se.nxv16f16.nxv16f16.i16.iXLen(iXLen, <16 x half>, i16, iXLen) -define void @test_sf_vc_fv_se_e16mf2(<2 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e16mf2: +define void @test_sf_vc_fvx_se_e16m8(<32 x half> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvx_se_e16m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i16.f16.iXLen(iXLen 1, iXLen 31, <2 x i16> %vs2, half %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32f16.nxv32f16.i16.iXLen(iXLen 3, iXLen 31, <32 x half> %vs2, i16 %rs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i16.f16.iXLen(iXLen, iXLen, <2 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32f16.nxv32f16.i16.iXLen(iXLen, iXLen, <32 x half>, i16, iXLen) -define void @test_sf_vc_fv_se_e16m1(<4 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e16m1: +define <32 x half> @test_sf_vc_v_fvx_se_e16m8(<32 x half> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvx_se_e16m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i16.f16.iXLen(iXLen 1, iXLen 31, <4 x i16> %vs2, half %fs1, iXLen %vl) - ret void + %0 = tail call <32 x half> @llvm.riscv.sf.vc.v.xv.se.nxv32f16.nxv32f16.i16.iXLen(iXLen 3, <32 x half> %vs2, i16 %rs1, iXLen %vl) + ret <32 x half> %0 } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i16.f16.iXLen(iXLen, iXLen, <4 x i16>, half, iXLen) +declare <32 x half> @llvm.riscv.sf.vc.v.xv.se.nxv32f16.nxv32f16.i16.iXLen(iXLen, <32 x half>, i16, iXLen) -define void @test_sf_vc_fv_se_e16m2(<8 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e16m2: +define void @test_sf_vc_fvx_se_e32mf2(<1 x float> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvx_se_e32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i16.f16.iXLen(iXLen 1, iXLen 31, <8 x i16> %vs2, half %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f32.nxv1f32.i32.iXLen(iXLen 3, iXLen 31, <1 x float> %vs2, i32 %rs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i16.f16.iXLen(iXLen, iXLen, <8 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f32.nxv1f32.i32.iXLen(iXLen, iXLen, <1 x float>, i32, iXLen) -define void @test_sf_vc_fv_se_e16m4(<16 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e16m4: +define <1 x float> @test_sf_vc_v_fvx_se_e32mf2(<1 x float> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvx_se_e32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16i16.f16.iXLen(iXLen 1, iXLen 31, <16 x i16> %vs2, half %fs1, iXLen %vl) - ret void + %0 = tail call <1 x float> @llvm.riscv.sf.vc.v.xv.se.nxv1f32.nxv1f32.i32.iXLen(iXLen 3, <1 x float> %vs2, i32 %rs1, iXLen %vl) + ret <1 x float> %0 } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16i16.f16.iXLen(iXLen, iXLen, <16 x i16>, half, iXLen) +declare <1 x float> @llvm.riscv.sf.vc.v.xv.se.nxv1f32.nxv1f32.i32.iXLen(iXLen, <1 x float>, i32, iXLen) -define void @test_sf_vc_fv_se_e16m8(<32 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e16m8: +define void @test_sf_vc_fvx_se_e32m1(<2 x float> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvx_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32i16.f16.iXLen(iXLen 1, iXLen 31, <32 x i16> %vs2, half %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f32.nxv2f32.i32.iXLen(iXLen 3, iXLen 31, <2 x float> %vs2, i32 %rs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32i16.f16.iXLen(iXLen, iXLen, <32 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f32.nxv2f32.i32.iXLen(iXLen, iXLen, <2 x float>, i32, iXLen) -define void @test_sf_vc_fv_se_e32mf2(<1 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e32mf2: +define <2 x float> @test_sf_vc_v_fvx_se_e32m1(<2 x float> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvx_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i32.f32.iXLen(iXLen 1, iXLen 31, <1 x i32> %vs2, float %fs1, iXLen %vl) - ret void + %0 = tail call <2 x float> @llvm.riscv.sf.vc.v.xv.se.nxv2f32.nxv2f32.i32.iXLen(iXLen 3, <2 x float> %vs2, i32 %rs1, iXLen %vl) + ret <2 x float> %0 } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i32.f32.iXLen(iXLen, iXLen, <1 x i32>, float, iXLen) +declare <2 x float> @llvm.riscv.sf.vc.v.xv.se.nxv2f32.nxv2f32.i32.iXLen(iXLen, <2 x float>, i32, iXLen) -define void @test_sf_vc_fv_se_e32m1(<2 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e32m1: +define void @test_sf_vc_fvx_se_e32m2(<4 x float> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvx_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i32.f32.iXLen(iXLen 1, iXLen 31, <2 x i32> %vs2, float %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f32.nxv4f32.i32.iXLen(iXLen 3, iXLen 31, <4 x float> %vs2, i32 %rs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i32.f32.iXLen(iXLen, iXLen, <2 x i32>, float, iXLen) +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f32.nxv4f32.i32.iXLen(iXLen, iXLen, <4 x float>, i32, iXLen) -define void @test_sf_vc_fv_se_e32m2(<4 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e32m2: +define <4 x float> @test_sf_vc_v_fvx_se_e32m2(<4 x float> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvx_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i32.f32.iXLen(iXLen 1, iXLen 31, <4 x i32> %vs2, float %fs1, iXLen %vl) - ret void + %0 = tail call <4 x float> @llvm.riscv.sf.vc.v.xv.se.nxv4f32.nxv4f32.i32.iXLen(iXLen 3, <4 x float> %vs2, i32 %rs1, iXLen %vl) + ret <4 x float> %0 } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i32.f32.iXLen(iXLen, iXLen, <4 x i32>, float, iXLen) +declare <4 x float> @llvm.riscv.sf.vc.v.xv.se.nxv4f32.nxv4f32.i32.iXLen(iXLen, <4 x float>, i32, iXLen) -define void @test_sf_vc_fv_se_e32m4(<8 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e32m4: +define void @test_sf_vc_fvx_se_e32m4(<8 x float> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvx_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i32.f32.iXLen(iXLen 1, iXLen 31, <8 x i32> %vs2, float %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f32.nxv8f32.i32.iXLen(iXLen 3, iXLen 31, <8 x float> %vs2, i32 %rs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i32.f32.iXLen(iXLen, iXLen, <8 x i32>, float, iXLen) +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f32.nxv8f32.i32.iXLen(iXLen, iXLen, <8 x float>, i32, iXLen) -define void @test_sf_vc_fv_se_e32m8(<16 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e32m8: +define <8 x float> @test_sf_vc_v_fvx_se_e32m4(<8 x float> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvx_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16i32.f32.iXLen(iXLen 1, iXLen 31, <16 x i32> %vs2, float %fs1, iXLen %vl) - ret void + %0 = tail call <8 x float> @llvm.riscv.sf.vc.v.xv.se.nxv8f32.nxv8f32.i32.iXLen(iXLen 3, <8 x float> %vs2, i32 %rs1, iXLen %vl) + ret <8 x float> %0 } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16i32.f32.iXLen(iXLen, iXLen, <16 x i32>, float, iXLen) +declare <8 x float> @llvm.riscv.sf.vc.v.xv.se.nxv8f32.nxv8f32.i32.iXLen(iXLen, <8 x float>, i32, iXLen) -define void @test_sf_vc_fv_se_e64m1(<1 x i64> %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e64m1: +define void @test_sf_vc_fvx_se_e32m8(<16 x float> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvx_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i64.f64.iXLen(iXLen 1, iXLen 31, <1 x i64> %vs2, double %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f32.nxv16f32.i32.iXLen(iXLen 3, iXLen 31, <16 x float> %vs2, i32 %rs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i64.f64.iXLen(iXLen, iXLen, <1 x i64>, double, iXLen) +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f32.nxv16f32.i32.iXLen(iXLen, iXLen, <16 x float>, i32, iXLen) -define void @test_sf_vc_fv_se_e64m2(<2 x i64> %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e64m2: +define <16 x float> @test_sf_vc_v_fvx_se_e32m8(<16 x float> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvx_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i64.f64.iXLen(iXLen 1, iXLen 31, <2 x i64> %vs2, double %fs1, iXLen %vl) - ret void + %0 = tail call <16 x float> @llvm.riscv.sf.vc.v.xv.se.nxv16f32.nxv16f32.i32.iXLen(iXLen 3, <16 x float> %vs2, i32 %rs1, iXLen %vl) + ret <16 x float> %0 } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i64.f64.iXLen(iXLen, iXLen, <2 x i64>, double, iXLen) +declare <16 x float> @llvm.riscv.sf.vc.v.xv.se.nxv16f32.nxv16f32.i32.iXLen(iXLen, <16 x float>, i32, iXLen) -define void @test_sf_vc_fv_se_e64m4(<4 x i64> %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e64m4: +define void @test_sf_vc_fvi_se_e16mf4(<1 x half> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvi_se_e16mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 8 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i64.f64.iXLen(iXLen 1, iXLen 31, <4 x i64> %vs2, double %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f16.nxv1f16.iXLen.iXLen(iXLen 3, iXLen 31, <1 x half> %vs2, iXLen 8, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i64.f64.iXLen(iXLen, iXLen, <4 x i64>, double, iXLen) +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f16.nxv1f16.iXLen.iXLen(iXLen, iXLen, <1 x half>, iXLen, iXLen) -define void @test_sf_vc_fv_se_e64m8(<8 x i64> %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e64m8: +define <1 x half> @test_sf_vc_v_fvi_se_e16mf4(<1 x half> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvi_se_e16mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 8 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i64.f64.iXLen(iXLen 1, iXLen 31, <8 x i64> %vs2, double %fs1, iXLen %vl) - ret void + %0 = tail call <1 x half> @llvm.riscv.sf.vc.v.iv.se.nxv1f16.nxv1f16.iXLen.iXLen(iXLen 3, <1 x half> %vs2, iXLen 8, iXLen %vl) + ret <1 x half> %0 } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i64.f64.iXLen(iXLen, iXLen, <8 x i64>, double, iXLen) +declare <1 x half> @llvm.riscv.sf.vc.v.iv.se.nxv1f16.nxv1f16.iXLen.iXLen(iXLen, <1 x half>, iXLen, iXLen) -define <1 x i16> @test_sf_vc_v_fv_se_e16mf4(<1 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e16mf4: +define void @test_sf_vc_fvi_se_e16mf2(<2 x half> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvi_se_e16mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 8 ; CHECK-NEXT: ret entry: - %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv1i16.iXLen.f16.iXLen(iXLen 1, <1 x i16> %vs2, half %fs1, iXLen %vl) - ret <1 x i16> %0 + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f16.nxv2f16.iXLen.iXLen(iXLen 3, iXLen 31, <2 x half> %vs2, iXLen 8, iXLen %vl) + ret void } -declare <1 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv1i16.iXLen.f16.iXLen(iXLen, <1 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f16.nxv2f16.iXLen.iXLen(iXLen, iXLen, <2 x half>, iXLen, iXLen) -define <2 x i16> @test_sf_vc_v_fv_se_e16mf2(<2 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e16mf2: +define <2 x half> @test_sf_vc_v_fvi_se_e16mf2(<2 x half> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvi_se_e16mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 8 ; CHECK-NEXT: ret entry: - %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv2i16.iXLen.f16.iXLen(iXLen 1, <2 x i16> %vs2, half %fs1, iXLen %vl) - ret <2 x i16> %0 + %0 = tail call <2 x half> @llvm.riscv.sf.vc.v.iv.se.nxv2f16.nxv2f16.iXLen.iXLen(iXLen 3, <2 x half> %vs2, iXLen 8, iXLen %vl) + ret <2 x half> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv2i16.iXLen.f16.iXLen(iXLen, <2 x i16>, half, iXLen) +declare <2 x half> @llvm.riscv.sf.vc.v.iv.se.nxv2f16.nxv2f16.iXLen.iXLen(iXLen, <2 x half>, iXLen, iXLen) -define <4 x i16> @test_sf_vc_v_fv_se_e16m1(<4 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e16m1: +define void @test_sf_vc_fvi_se_e16m1(<4 x half> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvi_se_e16m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 8 ; CHECK-NEXT: ret entry: - %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv4i16.iXLen.f16.iXLen(iXLen 1, <4 x i16> %vs2, half %fs1, iXLen %vl) - ret <4 x i16> %0 + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f16.nxv4f16.iXLen.iXLen(iXLen 3, iXLen 31, <4 x half> %vs2, iXLen 8, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f16.nxv4f16.iXLen.iXLen(iXLen, iXLen, <4 x half>, iXLen, iXLen) + +define <4 x half> @test_sf_vc_v_fvi_se_e16m1(<4 x half> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvi_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 8 +; CHECK-NEXT: ret +entry: + %0 = tail call <4 x half> @llvm.riscv.sf.vc.v.iv.se.nxv4f16.nxv4f16.iXLen.iXLen(iXLen 3, <4 x half> %vs2, iXLen 8, iXLen %vl) + ret <4 x half> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv4i16.iXLen.f16.iXLen(iXLen, <4 x i16>, half, iXLen) +declare <4 x half> @llvm.riscv.sf.vc.v.iv.se.nxv4f16.nxv4f16.iXLen.iXLen(iXLen, <4 x half>, iXLen, iXLen) -define <8 x i16> @test_sf_vc_v_fv_se_e16m2(<8 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e16m2: +define void @test_sf_vc_fvi_se_e16m2(<8 x half> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvi_se_e16m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 8 ; CHECK-NEXT: ret entry: - %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv8i16.iXLen.f16.iXLen(iXLen 1, <8 x i16> %vs2, half %fs1, iXLen %vl) - ret <8 x i16> %0 + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f16.nxv8f16.iXLen.iXLen(iXLen 3, iXLen 31, <8 x half> %vs2, iXLen 8, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f16.nxv8f16.iXLen.iXLen(iXLen, iXLen, <8 x half>, iXLen, iXLen) + +define <8 x half> @test_sf_vc_v_fvi_se_e16m2(<8 x half> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvi_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 8 +; CHECK-NEXT: ret +entry: + %0 = tail call <8 x half> @llvm.riscv.sf.vc.v.iv.se.nxv8f16.nxv8f16.iXLen.iXLen(iXLen 3, <8 x half> %vs2, iXLen 8, iXLen %vl) + ret <8 x half> %0 } -declare <8 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv8i16.iXLen.f16.iXLen(iXLen, <8 x i16>, half, iXLen) +declare <8 x half> @llvm.riscv.sf.vc.v.iv.se.nxv8f16.nxv8f16.iXLen.iXLen(iXLen, <8 x half>, iXLen, iXLen) -define <16 x i16> @test_sf_vc_v_fv_se_e16m4(<16 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e16m4: +define void @test_sf_vc_fvi_se_e16m4(<16 x half> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvi_se_e16m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 8 ; CHECK-NEXT: ret entry: - %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv16i16.iXLen.f16.iXLen(iXLen 1, <16 x i16> %vs2, half %fs1, iXLen %vl) - ret <16 x i16> %0 + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f16.nxv16f16.iXLen.iXLen(iXLen 3, iXLen 31, <16 x half> %vs2, iXLen 8, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f16.nxv16f16.iXLen.iXLen(iXLen, iXLen, <16 x half>, iXLen, iXLen) + +define <16 x half> @test_sf_vc_v_fvi_se_e16m4(<16 x half> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvi_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 8 +; CHECK-NEXT: ret +entry: + %0 = tail call <16 x half> @llvm.riscv.sf.vc.v.iv.se.nxv16f16.nxv16f16.iXLen.iXLen(iXLen 3, <16 x half> %vs2, iXLen 8, iXLen %vl) + ret <16 x half> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv16i16.iXLen.f16.iXLen(iXLen, <16 x i16>, half, iXLen) +declare <16 x half> @llvm.riscv.sf.vc.v.iv.se.nxv16f16.nxv16f16.iXLen.iXLen(iXLen, <16 x half>, iXLen, iXLen) -define <32 x i16> @test_sf_vc_v_fv_se_e16m8(<32 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e16m8: +define void @test_sf_vc_fvi_se_e16m8(<32 x half> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvi_se_e16m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 8 ; CHECK-NEXT: ret entry: - %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv32i16.iXLen.f16.iXLen(iXLen 1, <32 x i16> %vs2, half %fs1, iXLen %vl) - ret <32 x i16> %0 + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32f16.nxv32f16.iXLen.iXLen(iXLen 3, iXLen 31, <32 x half> %vs2, iXLen 8, iXLen %vl) + ret void } -declare <32 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv32i16.iXLen.f16.iXLen(iXLen, <32 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32f16.nxv32f16.iXLen.iXLen(iXLen, iXLen, <32 x half>, iXLen, iXLen) -define <1 x i32> @test_sf_vc_v_fv_se_e32mf2(<1 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e32mf2: +define <32 x half> @test_sf_vc_v_fvi_se_e16m8(<32 x half> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvi_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 8 +; CHECK-NEXT: ret +entry: + %0 = tail call <32 x half> @llvm.riscv.sf.vc.v.iv.se.nxv32f16.nxv32f16.iXLen.iXLen(iXLen 3, <32 x half> %vs2, iXLen 8, iXLen %vl) + ret <32 x half> %0 +} + +declare <32 x half> @llvm.riscv.sf.vc.v.iv.se.nxv32f16.nxv32f16.iXLen.iXLen(iXLen, <32 x half>, iXLen, iXLen) + +define void @test_sf_vc_fvi_se_e32mf2(<1 x float> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvi_se_e32mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 8 ; CHECK-NEXT: ret entry: - %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv1i32.iXLen.f32.iXLen(iXLen 1, <1 x i32> %vs2, float %fs1, iXLen %vl) - ret <1 x i32> %0 + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f32.nxv1f32.iXLen.iXLen(iXLen 3, iXLen 31, <1 x float> %vs2, iXLen 8, iXLen %vl) + ret void } -declare <1 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv1i32.iXLen.f32.iXLen(iXLen, <1 x i32>, float, iXLen) +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f32.nxv1f32.iXLen.iXLen(iXLen, iXLen, <1 x float>, iXLen, iXLen) -define <2 x i32> @test_sf_vc_v_fv_se_e32m1(<2 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e32m1: +define <1 x float> @test_sf_vc_v_fvi_se_e32mf2(<1 x float> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvi_se_e32mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 8 ; CHECK-NEXT: ret entry: - %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv2i32.iXLen.f32.iXLen(iXLen 1, <2 x i32> %vs2, float %fs1, iXLen %vl) - ret <2 x i32> %0 + %0 = tail call <1 x float> @llvm.riscv.sf.vc.v.iv.se.nxv1f32.nxv1f32.iXLen.iXLen(iXLen 3, <1 x float> %vs2, iXLen 8, iXLen %vl) + ret <1 x float> %0 +} + +declare <1 x float> @llvm.riscv.sf.vc.v.iv.se.nxv1f32.nxv1f32.iXLen.iXLen(iXLen, <1 x float>, iXLen, iXLen) + +define void @test_sf_vc_fvi_se_e32m1(<2 x float> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvi_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f32.nxv2f32.iXLen.iXLen(iXLen 3, iXLen 31, <2 x float> %vs2, iXLen 8, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f32.nxv2f32.iXLen.iXLen(iXLen, iXLen, <2 x float>, iXLen, iXLen) + +define <2 x float> @test_sf_vc_v_fvi_se_e32m1(<2 x float> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvi_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 8 +; CHECK-NEXT: ret +entry: + %0 = tail call <2 x float> @llvm.riscv.sf.vc.v.iv.se.nxv2f32.nxv2f32.iXLen.iXLen(iXLen 3, <2 x float> %vs2, iXLen 8, iXLen %vl) + ret <2 x float> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv2i32.iXLen.f32.iXLen(iXLen, <2 x i32>, float, iXLen) +declare <2 x float> @llvm.riscv.sf.vc.v.iv.se.nxv2f32.nxv2f32.iXLen.iXLen(iXLen, <2 x float>, iXLen, iXLen) -define <4 x i32> @test_sf_vc_v_fv_se_e32m2(<4 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e32m2: +define void @test_sf_vc_fvi_se_e32m2(<4 x float> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvi_se_e32m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 8 ; CHECK-NEXT: ret entry: - %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv4i32.iXLen.f32.iXLen(iXLen 1, <4 x i32> %vs2, float %fs1, iXLen %vl) - ret <4 x i32> %0 + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f32.nxv4f32.iXLen.iXLen(iXLen 3, iXLen 31, <4 x float> %vs2, iXLen 8, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f32.nxv4f32.iXLen.iXLen(iXLen, iXLen, <4 x float>, iXLen, iXLen) + +define <4 x float> @test_sf_vc_v_fvi_se_e32m2(<4 x float> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvi_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 8 +; CHECK-NEXT: ret +entry: + %0 = tail call <4 x float> @llvm.riscv.sf.vc.v.iv.se.nxv4f32.nxv4f32.iXLen.iXLen(iXLen 3, <4 x float> %vs2, iXLen 8, iXLen %vl) + ret <4 x float> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv4i32.iXLen.f32.iXLen(iXLen, <4 x i32>, float, iXLen) +declare <4 x float> @llvm.riscv.sf.vc.v.iv.se.nxv4f32.nxv4f32.iXLen.iXLen(iXLen, <4 x float>, iXLen, iXLen) -define <8 x i32> @test_sf_vc_v_fv_se_e32m4(<8 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e32m4: +define void @test_sf_vc_fvi_se_e32m4(<8 x float> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvi_se_e32m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 8 ; CHECK-NEXT: ret entry: - %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv8i32.iXLen.f32.iXLen(iXLen 1, <8 x i32> %vs2, float %fs1, iXLen %vl) - ret <8 x i32> %0 + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f32.nxv8f32.iXLen.iXLen(iXLen 3, iXLen 31, <8 x float> %vs2, iXLen 8, iXLen %vl) + ret void } -declare <8 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv8i32.iXLen.f32.iXLen(iXLen, <8 x i32>, float, iXLen) +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f32.nxv8f32.iXLen.iXLen(iXLen, iXLen, <8 x float>, iXLen, iXLen) -define <16 x i32> @test_sf_vc_v_fv_se_e32m8(<16 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e32m8: +define <8 x float> @test_sf_vc_v_fvi_se_e32m4(<8 x float> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvi_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 8 ; CHECK-NEXT: ret entry: - %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv16i32.iXLen.f32.iXLen(iXLen 1, <16 x i32> %vs2, float %fs1, iXLen %vl) - ret <16 x i32> %0 + %0 = tail call <8 x float> @llvm.riscv.sf.vc.v.iv.se.nxv8f32.nxv8f32.iXLen.iXLen(iXLen 3, <8 x float> %vs2, iXLen 8, iXLen %vl) + ret <8 x float> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv16i32.iXLen.f32.iXLen(iXLen, <16 x i32>, float, iXLen) +declare <8 x float> @llvm.riscv.sf.vc.v.iv.se.nxv8f32.nxv8f32.iXLen.iXLen(iXLen, <8 x float>, iXLen, iXLen) -define <1 x i64> @test_sf_vc_v_fv_se_e64m1(<1 x i64> %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e64m1: +define void @test_sf_vc_fvi_se_e32m8(<16 x float> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvi_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 8 ; CHECK-NEXT: ret entry: - %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv1i64.iXLen.f64.iXLen(iXLen 1, <1 x i64> %vs2, double %fs1, iXLen %vl) - ret <1 x i64> %0 + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f32.nxv16f32.iXLen.iXLen(iXLen 3, iXLen 31, <16 x float> %vs2, iXLen 8, iXLen %vl) + ret void } -declare <1 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv1i64.iXLen.f64.iXLen(iXLen, <1 x i64>, double, iXLen) +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f32.nxv16f32.iXLen.iXLen(iXLen, iXLen, <16 x float>, iXLen, iXLen) -define <2 x i64> @test_sf_vc_v_fv_se_e64m2(<2 x i64> %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e64m2: +define <16 x float> @test_sf_vc_v_fvi_se_e32m8(<16 x float> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvi_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 8 ; CHECK-NEXT: ret entry: - %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv2i64.iXLen.f64.iXLen(iXLen 1, <2 x i64> %vs2, double %fs1, iXLen %vl) - ret <2 x i64> %0 + %0 = tail call <16 x float> @llvm.riscv.sf.vc.v.iv.se.nxv16f32.nxv16f32.iXLen.iXLen(iXLen 3, <16 x float> %vs2, iXLen 8, iXLen %vl) + ret <16 x float> %0 } -declare <2 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv2i64.iXLen.f64.iXLen(iXLen, <2 x i64>, double, iXLen) +declare <16 x float> @llvm.riscv.sf.vc.v.iv.se.nxv16f32.nxv16f32.iXLen.iXLen(iXLen, <16 x float>, iXLen, iXLen) -define <4 x i64> @test_sf_vc_v_fv_se_e64m4(<4 x i64> %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e64m4: +define void @test_sf_vc_fvf_se_e16mf4(<1 x half> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvf_se_e16mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv4i64.iXLen.f64.iXLen(iXLen 1, <4 x i64> %vs2, double %fs1, iXLen %vl) - ret <4 x i64> %0 + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f16.nxv1f16.f16.iXLen(iXLen 1, iXLen 31, <1 x half> %vs2, half %rs1, iXLen %vl) + ret void } -declare <4 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv4i64.iXLen.f64.iXLen(iXLen, <4 x i64>, double, iXLen) +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f16.nxv1f16.f16.iXLen(iXLen, iXLen, <1 x half>, half, iXLen) -define <8 x i64> @test_sf_vc_v_fv_se_e64m8(<8 x i64> %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e64m8: +define <1 x half> @test_sf_vc_v_fvf_se_e16mf4(<1 x half> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvf_se_e16mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv8i64.iXLen.f64.iXLen(iXLen 1, <8 x i64> %vs2, double %fs1, iXLen %vl) - ret <8 x i64> %0 + %0 = tail call <1 x half> @llvm.riscv.sf.vc.v.fv.se.nxv1f16.nxv1f16.iXLen.f16(iXLen 1, <1 x half> %vs2, half %rs1, iXLen %vl) + ret <1 x half> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv8i64.iXLen.f64.iXLen(iXLen, <8 x i64>, double, iXLen) +declare <1 x half> @llvm.riscv.sf.vc.v.fv.se.nxv1f16.nxv1f16.iXLen.f16(iXLen, <1 x half>, half, iXLen) -define <1 x i16> @test_sf_vc_v_fv_e16mf4(<1 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e16mf4: +define void @test_sf_vc_fvf_se_e16mf2(<2 x half> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvf_se_e16mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.fv.nxv1i16.iXLen.f16.iXLen(iXLen 1, <1 x i16> %vs2, half %fs1, iXLen %vl) - ret <1 x i16> %0 + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f16.nxv2f16.f16.iXLen(iXLen 1, iXLen 31, <2 x half> %vs2, half %rs1, iXLen %vl) + ret void } -declare <1 x i16> @llvm.riscv.sf.vc.v.fv.nxv1i16.iXLen.f16.iXLen(iXLen, <1 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f16.nxv2f16.f16.iXLen(iXLen, iXLen, <2 x half>, half, iXLen) -define <2 x i16> @test_sf_vc_v_fv_e16mf2(<2 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e16mf2: +define <2 x half> @test_sf_vc_v_fvf_se_e16mf2(<2 x half> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvf_se_e16mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.fv.nxv2i16.iXLen.f16.iXLen(iXLen 1, <2 x i16> %vs2, half %fs1, iXLen %vl) - ret <2 x i16> %0 + %0 = tail call <2 x half> @llvm.riscv.sf.vc.v.fv.se.nxv2f16.nxv2f16.iXLen.f16(iXLen 1, <2 x half> %vs2, half %rs1, iXLen %vl) + ret <2 x half> %0 +} + +declare <2 x half> @llvm.riscv.sf.vc.v.fv.se.nxv2f16.nxv2f16.iXLen.f16(iXLen, <2 x half>, half, iXLen) + +define void @test_sf_vc_fvf_se_e16m1(<4 x half> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvf_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f16.nxv4f16.f16.iXLen(iXLen 1, iXLen 31, <4 x half> %vs2, half %rs1, iXLen %vl) + ret void } -declare <2 x i16> @llvm.riscv.sf.vc.v.fv.nxv2i16.iXLen.f16.iXLen(iXLen, <2 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f16.nxv4f16.f16.iXLen(iXLen, iXLen, <4 x half>, half, iXLen) -define <4 x i16> @test_sf_vc_v_fv_e16m1(<4 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e16m1: +define <4 x half> @test_sf_vc_v_fvf_se_e16m1(<4 x half> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvf_se_e16m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.fv.nxv4i16.iXLen.f16.iXLen(iXLen 1, <4 x i16> %vs2, half %fs1, iXLen %vl) - ret <4 x i16> %0 + %0 = tail call <4 x half> @llvm.riscv.sf.vc.v.fv.se.nxv4f16.nxv4f16.iXLen.f16(iXLen 1, <4 x half> %vs2, half %rs1, iXLen %vl) + ret <4 x half> %0 +} + +declare <4 x half> @llvm.riscv.sf.vc.v.fv.se.nxv4f16.nxv4f16.iXLen.f16(iXLen, <4 x half>, half, iXLen) + +define void @test_sf_vc_fvf_se_e16m2(<8 x half> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvf_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f16.nxv8f16.f16.iXLen(iXLen 1, iXLen 31, <8 x half> %vs2, half %rs1, iXLen %vl) + ret void } -declare <4 x i16> @llvm.riscv.sf.vc.v.fv.nxv4i16.iXLen.f16.iXLen(iXLen, <4 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f16.nxv8f16.f16.iXLen(iXLen, iXLen, <8 x half>, half, iXLen) -define <8 x i16> @test_sf_vc_v_fv_e16m2(<8 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e16m2: +define <8 x half> @test_sf_vc_v_fvf_se_e16m2(<8 x half> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvf_se_e16m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.fv.nxv8i16.iXLen.f16.iXLen(iXLen 1, <8 x i16> %vs2, half %fs1, iXLen %vl) - ret <8 x i16> %0 + %0 = tail call <8 x half> @llvm.riscv.sf.vc.v.fv.se.nxv8f16.nxv8f16.iXLen.f16(iXLen 1, <8 x half> %vs2, half %rs1, iXLen %vl) + ret <8 x half> %0 +} + +declare <8 x half> @llvm.riscv.sf.vc.v.fv.se.nxv8f16.nxv8f16.iXLen.f16(iXLen, <8 x half>, half, iXLen) + +define void @test_sf_vc_fvf_se_e16m4(<16 x half> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvf_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f16.nxv16f16.f16.iXLen(iXLen 1, iXLen 31, <16 x half> %vs2, half %rs1, iXLen %vl) + ret void } -declare <8 x i16> @llvm.riscv.sf.vc.v.fv.nxv8i16.iXLen.f16.iXLen(iXLen, <8 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f16.nxv16f16.f16.iXLen(iXLen, iXLen, <16 x half>, half, iXLen) -define <16 x i16> @test_sf_vc_v_fv_e16m4(<16 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e16m4: +define <16 x half> @test_sf_vc_v_fvf_se_e16m4(<16 x half> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvf_se_e16m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.fv.nxv16i16.iXLen.f16.iXLen(iXLen 1, <16 x i16> %vs2, half %fs1, iXLen %vl) - ret <16 x i16> %0 + %0 = tail call <16 x half> @llvm.riscv.sf.vc.v.fv.se.nxv16f16.nxv16f16.iXLen.f16(iXLen 1, <16 x half> %vs2, half %rs1, iXLen %vl) + ret <16 x half> %0 +} + +declare <16 x half> @llvm.riscv.sf.vc.v.fv.se.nxv16f16.nxv16f16.iXLen.f16(iXLen, <16 x half>, half, iXLen) + +define void @test_sf_vc_fvf_se_e16m8(<32 x half> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvf_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32f16.nxv32f16.f16.iXLen(iXLen 1, iXLen 31, <32 x half> %vs2, half %rs1, iXLen %vl) + ret void } -declare <16 x i16> @llvm.riscv.sf.vc.v.fv.nxv16i16.iXLen.f16.iXLen(iXLen, <16 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32f16.nxv32f16.f16.iXLen(iXLen, iXLen, <32 x half>, half, iXLen) -define <32 x i16> @test_sf_vc_v_fv_e16m8(<32 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e16m8: +define <32 x half> @test_sf_vc_v_fvf_se_e16m8(<32 x half> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvf_se_e16m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.fv.nxv32i16.iXLen.f16.iXLen(iXLen 1, <32 x i16> %vs2, half %fs1, iXLen %vl) - ret <32 x i16> %0 + %0 = tail call <32 x half> @llvm.riscv.sf.vc.v.fv.se.nxv32f16.nxv32f16.iXLen.f16(iXLen 1, <32 x half> %vs2, half %rs1, iXLen %vl) + ret <32 x half> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.fv.nxv32i16.iXLen.f16.iXLen(iXLen, <32 x i16>, half, iXLen) +declare <32 x half> @llvm.riscv.sf.vc.v.fv.se.nxv32f16.nxv32f16.iXLen.f16(iXLen, <32 x half>, half, iXLen) -define <1 x i32> @test_sf_vc_v_fv_e32mf2(<1 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e32mf2: +define void @test_sf_vc_fvf_se_e32mf2(<1 x float> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvf_se_e32mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.fv.nxv1i32.iXLen.f32.iXLen(iXLen 1, <1 x i32> %vs2, float %fs1, iXLen %vl) - ret <1 x i32> %0 + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f32.nxv1f32.f32.iXLen(iXLen 1, iXLen 31, <1 x float> %vs2, float %rs1, iXLen %vl) + ret void } -declare <1 x i32> @llvm.riscv.sf.vc.v.fv.nxv1i32.iXLen.f32.iXLen(iXLen, <1 x i32>, float, iXLen) +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f32.nxv1f32.f32.iXLen(iXLen, iXLen, <1 x float>, float, iXLen) -define <2 x i32> @test_sf_vc_v_fv_e32m1(<2 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e32m1: +define <1 x float> @test_sf_vc_v_fvf_se_e32mf2(<1 x float> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvf_se_e32mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.fv.nxv2i32.iXLen.f32.iXLen(iXLen 1, <2 x i32> %vs2, float %fs1, iXLen %vl) - ret <2 x i32> %0 + %0 = tail call <1 x float> @llvm.riscv.sf.vc.v.fv.se.nxv1f32.nxv1f32.iXLen.f32(iXLen 1, <1 x float> %vs2, float %rs1, iXLen %vl) + ret <1 x float> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.fv.nxv2i32.iXLen.f32.iXLen(iXLen, <2 x i32>, float, iXLen) +declare <1 x float> @llvm.riscv.sf.vc.v.fv.se.nxv1f32.nxv1f32.iXLen.f32(iXLen, <1 x float>, float, iXLen) -define <4 x i32> @test_sf_vc_v_fv_e32m2(<4 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e32m2: +define void @test_sf_vc_fvf_se_e32m1(<2 x float> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvf_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.fv.nxv4i32.iXLen.f32.iXLen(iXLen 1, <4 x i32> %vs2, float %fs1, iXLen %vl) - ret <4 x i32> %0 + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f32.nxv2f32.f32.iXLen(iXLen 1, iXLen 31, <2 x float> %vs2, float %rs1, iXLen %vl) + ret void } -declare <4 x i32> @llvm.riscv.sf.vc.v.fv.nxv4i32.iXLen.f32.iXLen(iXLen, <4 x i32>, float, iXLen) +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f32.nxv2f32.f32.iXLen(iXLen, iXLen, <2 x float>, float, iXLen) -define <8 x i32> @test_sf_vc_v_fv_e32m4(<8 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e32m4: +define <2 x float> @test_sf_vc_v_fvf_se_e32m1(<2 x float> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvf_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.fv.nxv8i32.iXLen.f32.iXLen(iXLen 1, <8 x i32> %vs2, float %fs1, iXLen %vl) - ret <8 x i32> %0 + %0 = tail call <2 x float> @llvm.riscv.sf.vc.v.fv.se.nxv2f32.nxv2f32.iXLen.f32(iXLen 1, <2 x float> %vs2, float %rs1, iXLen %vl) + ret <2 x float> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.fv.nxv8i32.iXLen.f32.iXLen(iXLen, <8 x i32>, float, iXLen) +declare <2 x float> @llvm.riscv.sf.vc.v.fv.se.nxv2f32.nxv2f32.iXLen.f32(iXLen, <2 x float>, float, iXLen) -define <16 x i32> @test_sf_vc_v_fv_e32m8(<16 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e32m8: +define void @test_sf_vc_fvf_se_e32m2(<4 x float> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvf_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.fv.nxv16i32.iXLen.f32.iXLen(iXLen 1, <16 x i32> %vs2, float %fs1, iXLen %vl) - ret <16 x i32> %0 + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f32.nxv4f32.f32.iXLen(iXLen 1, iXLen 31, <4 x float> %vs2, float %rs1, iXLen %vl) + ret void } -declare <16 x i32> @llvm.riscv.sf.vc.v.fv.nxv16i32.iXLen.f32.iXLen(iXLen, <16 x i32>, float, iXLen) +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f32.nxv4f32.f32.iXLen(iXLen, iXLen, <4 x float>, float, iXLen) -define <1 x i64> @test_sf_vc_v_fv_e64m1(<1 x i64> %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e64m1: +define <4 x float> @test_sf_vc_v_fvf_se_e32m2(<4 x float> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvf_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.fv.nxv1i64.iXLen.f64.iXLen(iXLen 1, <1 x i64> %vs2, double %fs1, iXLen %vl) - ret <1 x i64> %0 + %0 = tail call <4 x float> @llvm.riscv.sf.vc.v.fv.se.nxv4f32.nxv4f32.iXLen.f32(iXLen 1, <4 x float> %vs2, float %rs1, iXLen %vl) + ret <4 x float> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.fv.nxv1i64.iXLen.f64.iXLen(iXLen, <1 x i64>, double, iXLen) +declare <4 x float> @llvm.riscv.sf.vc.v.fv.se.nxv4f32.nxv4f32.iXLen.f32(iXLen, <4 x float>, float, iXLen) -define <2 x i64> @test_sf_vc_v_fv_e64m2(<2 x i64> %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e64m2: +define void @test_sf_vc_fvf_se_e32m4(<8 x float> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvf_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.fv.nxv2i64.iXLen.f64.iXLen(iXLen 1, <2 x i64> %vs2, double %fs1, iXLen %vl) - ret <2 x i64> %0 + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f32.nxv8f32.f32.iXLen(iXLen 1, iXLen 31, <8 x float> %vs2, float %rs1, iXLen %vl) + ret void } -declare <2 x i64> @llvm.riscv.sf.vc.v.fv.nxv2i64.iXLen.f64.iXLen(iXLen, <2 x i64>, double, iXLen) +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f32.nxv8f32.f32.iXLen(iXLen, iXLen, <8 x float>, float, iXLen) -define <4 x i64> @test_sf_vc_v_fv_e64m4(<4 x i64> %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e64m4: +define <8 x float> @test_sf_vc_v_fvf_se_e32m4(<8 x float> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvf_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.fv.nxv4i64.iXLen.f64.iXLen(iXLen 1, <4 x i64> %vs2, double %fs1, iXLen %vl) - ret <4 x i64> %0 + %0 = tail call <8 x float> @llvm.riscv.sf.vc.v.fv.se.nxv8f32.nxv8f32.iXLen.f32(iXLen 1, <8 x float> %vs2, float %rs1, iXLen %vl) + ret <8 x float> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.fv.nxv4i64.iXLen.f64.iXLen(iXLen, <4 x i64>, double, iXLen) +declare <8 x float> @llvm.riscv.sf.vc.v.fv.se.nxv8f32.nxv8f32.iXLen.f32(iXLen, <8 x float>, float, iXLen) -define <8 x i64> @test_sf_vc_v_fv_e64m8(<8 x i64> %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e64m8: +define void @test_sf_vc_fvf_se_e32m8(<16 x float> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvf_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f32.nxv16f32.f32.iXLen(iXLen 1, iXLen 31, <16 x float> %vs2, float %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f32.nxv16f32.f32.iXLen(iXLen, iXLen, <16 x float>, float, iXLen) + +define <16 x float> @test_sf_vc_v_fvf_se_e32m8(<16 x float> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvf_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.fv.nxv8i64.iXLen.f64.iXLen(iXLen 1, <8 x i64> %vs2, double %fs1, iXLen %vl) - ret <8 x i64> %0 + %0 = tail call <16 x float> @llvm.riscv.sf.vc.v.fv.se.nxv16f32.nxv16f32.iXLen.f32(iXLen 1, <16 x float> %vs2, float %rs1, iXLen %vl) + ret <16 x float> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.fv.nxv8i64.iXLen.f64.iXLen(iXLen, <8 x i64>, double, iXLen) +declare <16 x float> @llvm.riscv.sf.vc.v.fv.se.nxv16f32.nxv16f32.iXLen.f32(iXLen, <16 x float>, float, iXLen) + diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvv.ll index 7be1f1ab65f80..2a9153fc0f82d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvv.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+xsfvcp \ +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+xsfvcp \ ; RUN: -verify-machineinstrs | FileCheck %s -; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+xsfvcp \ +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+xsfvcp \ ; RUN: -verify-machineinstrs | FileCheck %s define void @test_sf_vc_vvv_se_e8mf8(<1 x i8> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) { @@ -2422,587 +2422,1251 @@ entry: declare <8 x i64> @llvm.riscv.sf.vc.v.ivv.nxv8i64.iXLen.iXLen.iXLen(iXLen, <8 x i64>, <8 x i64>, iXLen, iXLen) -define void @test_sf_vc_fvv_se_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e16mf4: +define void @test_sf_vc_fvvv_se_e16mf4(<1 x half> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e16mf4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 +; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f16.nxv1i16.nxv1i16.iXLen(iXLen 3, <1 x half> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f16.nxv1i16.nxv1i16.iXLen(iXLen, <1 x half>, <1 x i16>, <1 x i16>, iXLen) + +define <1 x half> @test_sf_vc_fv_fvv_se_e16mf4(<1 x half> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call <1 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv1f16.nxv1i16.nxv1i16.iXLen(iXLen 3, <1 x half> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) + ret <1 x half> %0 +} + +declare <1 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv1f16.nxv1i16.nxv1i16.iXLen(iXLen, <1 x half>, <1 x i16>, <1 x i16>, iXLen) + +define void @test_sf_vc_fvvv_se_e16mf2(<2 x half> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f16.nxv2i16.nxv2i16.iXLen(iXLen 3, <2 x half> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f16.nxv2i16.nxv2i16.iXLen(iXLen, <2 x half>, <2 x i16>, <2 x i16>, iXLen) + +define <2 x half> @test_sf_vc_fv_fvv_se_e16mf2(<2 x half> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call <2 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv2f16.nxv2i16.nxv2i16.iXLen(iXLen 3, <2 x half> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) + ret <2 x half> %0 +} + +declare <2 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv2f16.nxv2i16.nxv2i16.iXLen(iXLen, <2 x half>, <2 x i16>, <2 x i16>, iXLen) + +define void @test_sf_vc_fvvv_se_e16m1(<4 x half> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f16.nxv4i16.nxv4i16.iXLen(iXLen 3, <4 x half> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f16.nxv4i16.nxv4i16.iXLen(iXLen, <4 x half>, <4 x i16>, <4 x i16>, iXLen) + +define <4 x half> @test_sf_vc_fv_fvv_se_e16m1(<4 x half> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call <4 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv4f16.nxv4i16.nxv4i16.iXLen(iXLen 3, <4 x half> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) + ret <4 x half> %0 +} + +declare <4 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv4f16.nxv4i16.nxv4i16.iXLen(iXLen, <4 x half>, <4 x i16>, <4 x i16>, iXLen) + +define void @test_sf_vc_fvvv_se_e16m2(<8 x half> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f16.nxv8i16.nxv8i16.iXLen(iXLen 3, <8 x half> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f16.nxv8i16.nxv8i16.iXLen(iXLen, <8 x half>, <8 x i16>, <8 x i16>, iXLen) + +define <8 x half> @test_sf_vc_fv_fvv_se_e16m2(<8 x half> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call <8 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv8f16.nxv8i16.nxv8i16.iXLen(iXLen 3, <8 x half> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) + ret <8 x half> %0 +} + +declare <8 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv8f16.nxv8i16.nxv8i16.iXLen(iXLen, <8 x half>, <8 x i16>, <8 x i16>, iXLen) + +define void @test_sf_vc_fvvv_se_e16m4(<16 x half> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f16.nxv16i16.nxv16i16.iXLen(iXLen 3, <16 x half> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f16.nxv16i16.nxv16i16.iXLen(iXLen, <16 x half>, <16 x i16>, <16 x i16>, iXLen) + +define <16 x half> @test_sf_vc_fv_fvv_se_e16m4(<16 x half> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call <16 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv16f16.nxv16i16.nxv16i16.iXLen(iXLen 3, <16 x half> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) + ret <16 x half> %0 +} + +declare <16 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv16f16.nxv16i16.nxv16i16.iXLen(iXLen, <16 x half>, <16 x i16>, <16 x i16>, iXLen) + +define void @test_sf_vc_fvvv_se_e16m8(<32 x half> %vd, <32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32f16.nxv32i16.nxv32i16.iXLen(iXLen 3, <32 x half> %vd, <32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32f16.nxv32i16.nxv32i16.iXLen(iXLen, <32 x half>, <32 x i16>, <32 x i16>, iXLen) + +define <32 x half> @test_sf_vc_fv_fvv_se_e16m8(<32 x half> %vd, <32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call <32 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv32f16.nxv32i16.nxv32i16.iXLen(iXLen 3, <32 x half> %vd, <32 x i16> %vs2, <32 x i16> %vs1, iXLen %vl) + ret <32 x half> %0 +} + +declare <32 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv32f16.nxv32i16.nxv32i16.iXLen(iXLen, <32 x half>, <32 x i16>, <32 x i16>, iXLen) + +define void @test_sf_vc_fvvv_se_e32mf2(<1 x float> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f32.nxv1i32.nxv1i32.iXLen(iXLen 3, <1 x float> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f32.nxv1i32.nxv1i32.iXLen(iXLen, <1 x float>, <1 x i32>, <1 x i32>, iXLen) + +define <1 x float> @test_sf_vc_fv_fvv_se_e32mf2(<1 x float> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call <1 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv1f32.nxv1i32.nxv1i32.iXLen(iXLen 3, <1 x float> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) + ret <1 x float> %0 +} + +declare <1 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv1f32.nxv1i32.nxv1i32.iXLen(iXLen, <1 x float>, <1 x i32>, <1 x i32>, iXLen) + +define void @test_sf_vc_fvvv_se_e32m1(<2 x float> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f32.nxv2i32.nxv2i32.iXLen(iXLen 3, <2 x float> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f32.nxv2i32.nxv2i32.iXLen(iXLen, <2 x float>, <2 x i32>, <2 x i32>, iXLen) + +define <2 x float> @test_sf_vc_fv_fvv_se_e32m1(<2 x float> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call <2 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv2f32.nxv2i32.nxv2i32.iXLen(iXLen 3, <2 x float> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) + ret <2 x float> %0 +} + +declare <2 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv2f32.nxv2i32.nxv2i32.iXLen(iXLen, <2 x float>, <2 x i32>, <2 x i32>, iXLen) + +define void @test_sf_vc_fvvv_se_e32m2(<4 x float> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f32.nxv4i32.nxv4i32.iXLen(iXLen 3, <4 x float> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f32.nxv4i32.nxv4i32.iXLen(iXLen, <4 x float>, <4 x i32>, <4 x i32>, iXLen) + +define <4 x float> @test_sf_vc_fv_fvv_se_e32m2(<4 x float> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call <4 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv4f32.nxv4i32.nxv4i32.iXLen(iXLen 3, <4 x float> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) + ret <4 x float> %0 +} + +declare <4 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv4f32.nxv4i32.nxv4i32.iXLen(iXLen, <4 x float>, <4 x i32>, <4 x i32>, iXLen) + +define void @test_sf_vc_fvvv_se_e32m4(<8 x float> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f32.nxv8i32.nxv8i32.iXLen(iXLen 3, <8 x float> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f32.nxv8i32.nxv8i32.iXLen(iXLen, <8 x float>, <8 x i32>, <8 x i32>, iXLen) + +define <8 x float> @test_sf_vc_fv_fvv_se_e32m4(<8 x float> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call <8 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv8f32.nxv8i32.nxv8i32.iXLen(iXLen 3, <8 x float> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) + ret <8 x float> %0 +} + +declare <8 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv8f32.nxv8i32.nxv8i32.iXLen(iXLen, <8 x float>, <8 x i32>, <8 x i32>, iXLen) + +define void @test_sf_vc_fvvv_se_e32m8(<16 x float> %vd, <16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f32.nxv16i32.nxv16i32.iXLen(iXLen 3, <16 x float> %vd, <16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f32.nxv16i32.nxv16i32.iXLen(iXLen, <16 x float>, <16 x i32>, <16 x i32>, iXLen) + +define <16 x float> @test_sf_vc_fv_fvv_se_e32m8(<16 x float> %vd, <16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call <16 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv16f32.nxv16i32.nxv16i32.iXLen(iXLen 3, <16 x float> %vd, <16 x i32> %vs2, <16 x i32> %vs1, iXLen %vl) + ret <16 x float> %0 +} + +declare <16 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv16f32.nxv16i32.nxv16i32.iXLen(iXLen, <16 x float>, <16 x i32>, <16 x i32>, iXLen) + +define void @test_sf_vc_fvvv_se_e64m1(<1 x double> %vd, <1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f64.nxv1i64.nxv1i64.iXLen(iXLen 3, <1 x double> %vd, <1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f64.nxv1i64.nxv1i64.iXLen(iXLen, <1 x double>, <1 x i64>, <1 x i64>, iXLen) + +define <1 x double> @test_sf_vc_fv_fvv_se_e64m1(<1 x double> %vd, <1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call <1 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv1f64.nxv1i64.nxv1i64.iXLen(iXLen 3, <1 x double> %vd, <1 x i64> %vs2, <1 x i64> %vs1, iXLen %vl) + ret <1 x double> %0 +} + +declare <1 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv1f64.nxv1i64.nxv1i64.iXLen(iXLen, <1 x double>, <1 x i64>, <1 x i64>, iXLen) + +define void @test_sf_vc_fvvv_se_e64m2(<2 x double> %vd, <2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f64.nxv2i64.nxv2i64.iXLen(iXLen 3, <2 x double> %vd, <2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f64.nxv2i64.nxv2i64.iXLen(iXLen, <2 x double>, <2 x i64>, <2 x i64>, iXLen) + +define <2 x double> @test_sf_vc_fv_fvv_se_e64m2(<2 x double> %vd, <2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call <2 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv2f64.nxv2i64.nxv2i64.iXLen(iXLen 3, <2 x double> %vd, <2 x i64> %vs2, <2 x i64> %vs1, iXLen %vl) + ret <2 x double> %0 +} + +declare <2 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv2f64.nxv2i64.nxv2i64.iXLen(iXLen, <2 x double>, <2 x i64>, <2 x i64>, iXLen) + +define void @test_sf_vc_fvvv_se_e64m4(<4 x double> %vd, <4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f64.nxv4i64.nxv4i64.iXLen(iXLen 3, <4 x double> %vd, <4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f64.nxv4i64.nxv4i64.iXLen(iXLen, <4 x double>, <4 x i64>, <4 x i64>, iXLen) + +define <4 x double> @test_sf_vc_fv_fvv_se_e64m4(<4 x double> %vd, <4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12 +; CHECK-NEXT: ret +entry: + %0 = tail call <4 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv4f64.nxv4i64.nxv4i64.iXLen(iXLen 3, <4 x double> %vd, <4 x i64> %vs2, <4 x i64> %vs1, iXLen %vl) + ret <4 x double> %0 +} + +declare <4 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv4f64.nxv4i64.nxv4i64.iXLen(iXLen, <4 x double>, <4 x i64>, <4 x i64>, iXLen) + +define void @test_sf_vc_fvvv_se_e64m8(<8 x double> %vd, <8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f64.nxv8i64.nxv8i64.iXLen(iXLen 3, <8 x double> %vd, <8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f64.nxv8i64.nxv8i64.iXLen(iXLen, <8 x double>, <8 x i64>, <8 x i64>, iXLen) + +define <8 x double> @test_sf_vc_fv_fvv_se_e64m8(<8 x double> %vd, <8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16 +; CHECK-NEXT: ret +entry: + %0 = tail call <8 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv8f64.nxv8i64.nxv8i64.iXLen(iXLen 3, <8 x double> %vd, <8 x i64> %vs2, <8 x i64> %vs1, iXLen %vl) + ret <8 x double> %0 +} + +declare <8 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv8f64.nxv8i64.nxv8i64.iXLen(iXLen, <8 x double>, <8 x i64>, <8 x i64>, iXLen) + +define void @test_sf_vc_fvvx_se_e16mf4(<1 x half> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvx_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f16.nxv1i16.i16.iXLen(iXLen 3, <1 x half> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f16.nxv1i16.i16.iXLen(iXLen, <1 x half>, <1 x i16>, i16, iXLen) + +define <1 x half> @test_sf_vc_v_fvvx_se_e16mf4(<1 x half> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <1 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv1f16.nxv1f16.nxv1i16.i16.iXLen(iXLen 3, <1 x half> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl) + ret <1 x half> %0 +} + +declare <1 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv1f16.nxv1f16.nxv1i16.i16.iXLen(iXLen, <1 x half>, <1 x i16>, i16, iXLen) + +define void @test_sf_vc_fvvx_se_e16mf2(<2 x half> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvx_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f16.nxv2i16.i16.iXLen(iXLen 3, <2 x half> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f16.nxv2i16.i16.iXLen(iXLen, <2 x half>, <2 x i16>, i16, iXLen) + +define <2 x half> @test_sf_vc_v_fvvx_se_e16mf2(<2 x half> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <2 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv2f16.nxv2f16.nxv2i16.i16.iXLen(iXLen 3, <2 x half> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl) + ret <2 x half> %0 +} + +declare <2 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv2f16.nxv2f16.nxv2i16.i16.iXLen(iXLen, <2 x half>, <2 x i16>, i16, iXLen) + +define void @test_sf_vc_fvvx_se_e16m1(<4 x half> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvx_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f16.nxv4i16.i16.iXLen(iXLen 3, <4 x half> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f16.nxv4i16.i16.iXLen(iXLen, <4 x half>, <4 x i16>, i16, iXLen) + +define <4 x half> @test_sf_vc_v_fvvx_se_e16m1(<4 x half> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <4 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv4f16.nxv4f16.nxv4i16.i16.iXLen(iXLen 3, <4 x half> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl) + ret <4 x half> %0 +} + +declare <4 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv4f16.nxv4f16.nxv4i16.i16.iXLen(iXLen, <4 x half>, <4 x i16>, i16, iXLen) + +define void @test_sf_vc_fvvx_se_e16m2(<8 x half> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvx_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f16.nxv8i16.i16.iXLen(iXLen 3, <8 x half> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f16.nxv8i16.i16.iXLen(iXLen, <8 x half>, <8 x i16>, i16, iXLen) + +define <8 x half> @test_sf_vc_v_fvvx_se_e16m2(<8 x half> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <8 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv8f16.nxv8f16.nxv8i16.i16.iXLen(iXLen 3, <8 x half> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl) + ret <8 x half> %0 +} + +declare <8 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv8f16.nxv8f16.nxv8i16.i16.iXLen(iXLen, <8 x half>, <8 x i16>, i16, iXLen) + +define void @test_sf_vc_fvvx_se_e16m4(<16 x half> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvx_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v10, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f16.nxv16i16.i16.iXLen(iXLen 3, <16 x half> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f16.nxv16i16.i16.iXLen(iXLen, <16 x half>, <16 x i16>, i16, iXLen) + +define <16 x half> @test_sf_vc_v_fvvx_se_e16m4(<16 x half> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i16.f16.iXLen(iXLen 1, <1 x i16> %vd, <1 x i16> %vs2, half %fs1, iXLen %vl) - ret void + %0 = tail call <16 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv16f16.nxv16f16.nxv16i16.i16.iXLen(iXLen 3, <16 x half> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl) + ret <16 x half> %0 } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i16.f16.iXLen(iXLen, <1 x i16>, <1 x i16>, half, iXLen) +declare <16 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv16f16.nxv16f16.nxv16i16.i16.iXLen(iXLen, <16 x half>, <16 x i16>, i16, iXLen) -define void @test_sf_vc_fvv_se_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e16mf2: +define void @test_sf_vc_fvvx_se_e16m8(<32 x half> %vd, <32 x i16> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvx_se_e16m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v12, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i16.f16.iXLen(iXLen 1, <2 x i16> %vd, <2 x i16> %vs2, half %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32f16.nxv32i16.i16.iXLen(iXLen 3, <32 x half> %vd, <32 x i16> %vs2, i16 %rs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i16.f16.iXLen(iXLen, <2 x i16>, <2 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32f16.nxv32i16.i16.iXLen(iXLen, <32 x half>, <32 x i16>, i16, iXLen) -define void @test_sf_vc_fvv_se_e16m1(<4 x i16> %vd, <4 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e16m1: +define <32 x half> @test_sf_vc_v_fvvx_se_e16m8(<32 x half> %vd, <32 x i16> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i16.f16.iXLen(iXLen 1, <4 x i16> %vd, <4 x i16> %vs2, half %fs1, iXLen %vl) - ret void + %0 = tail call <32 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv32f16.nxv32f16.nxv32i16.i16.iXLen(iXLen 3, <32 x half> %vd, <32 x i16> %vs2, i16 %rs1, iXLen %vl) + ret <32 x half> %0 } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i16.f16.iXLen(iXLen, <4 x i16>, <4 x i16>, half, iXLen) +declare <32 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv32f16.nxv32f16.nxv32i16.i16.iXLen(iXLen, <32 x half>, <32 x i16>, i16, iXLen) -define void @test_sf_vc_fvv_se_e16m2(<8 x i16> %vd, <8 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e16m2: +define void @test_sf_vc_fvvx_se_e32mf2(<1 x float> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvx_se_e32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i16.f16.iXLen(iXLen 1, <8 x i16> %vd, <8 x i16> %vs2, half %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f32.nxv1i32.i32.iXLen(iXLen 3, <1 x float> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i16.f16.iXLen(iXLen, <8 x i16>, <8 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f32.nxv1i32.i32.iXLen(iXLen, <1 x float>, <1 x i32>, i32, iXLen) -define void @test_sf_vc_fvv_se_e16m4(<16 x i16> %vd, <16 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e16m4: +define <1 x float> @test_sf_vc_v_fvvx_se_e32mf2(<1 x float> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i16.f16.iXLen(iXLen 1, <16 x i16> %vd, <16 x i16> %vs2, half %fs1, iXLen %vl) - ret void + %0 = tail call <1 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv1f32.nxv1f32.nxv1i32.i32.iXLen(iXLen 3, <1 x float> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl) + ret <1 x float> %0 } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i16.f16.iXLen(iXLen, <16 x i16>, <16 x i16>, half, iXLen) +declare <1 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv1f32.nxv1f32.nxv1i32.i32.iXLen(iXLen, <1 x float>, <1 x i32>, i32, iXLen) -define void @test_sf_vc_fvv_se_e16m8(<32 x i16> %vd, <32 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e16m8: +define void @test_sf_vc_fvvx_se_e32m1(<2 x float> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvx_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32i16.f16.iXLen(iXLen 1, <32 x i16> %vd, <32 x i16> %vs2, half %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f32.nxv2i32.i32.iXLen(iXLen 3, <2 x float> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32i16.f16.iXLen(iXLen, <32 x i16>, <32 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f32.nxv2i32.i32.iXLen(iXLen, <2 x float>, <2 x i32>, i32, iXLen) -define void @test_sf_vc_fvv_se_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e32mf2: +define <2 x float> @test_sf_vc_v_fvvx_se_e32m1(<2 x float> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i32.f32.iXLen(iXLen 1, <1 x i32> %vd, <1 x i32> %vs2, float %fs1, iXLen %vl) - ret void + %0 = tail call <2 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv2f32.nxv2f32.nxv2i32.i32.iXLen(iXLen 3, <2 x float> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl) + ret <2 x float> %0 } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i32.f32.iXLen(iXLen, <1 x i32>, <1 x i32>, float, iXLen) +declare <2 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv2f32.nxv2f32.nxv2i32.i32.iXLen(iXLen, <2 x float>, <2 x i32>, i32, iXLen) -define void @test_sf_vc_fvv_se_e32m1(<2 x i32> %vd, <2 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e32m1: +define void @test_sf_vc_fvvx_se_e32m2(<4 x float> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvx_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i32.f32.iXLen(iXLen 1, <2 x i32> %vd, <2 x i32> %vs2, float %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f32.nxv4i32.i32.iXLen(iXLen 3, <4 x float> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i32.f32.iXLen(iXLen, <2 x i32>, <2 x i32>, float, iXLen) +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f32.nxv4i32.i32.iXLen(iXLen, <4 x float>, <4 x i32>, i32, iXLen) -define void @test_sf_vc_fvv_se_e32m2(<4 x i32> %vd, <4 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e32m2: +define <4 x float> @test_sf_vc_v_fvvx_se_e32m2(<4 x float> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i32.f32.iXLen(iXLen 1, <4 x i32> %vd, <4 x i32> %vs2, float %fs1, iXLen %vl) - ret void + %0 = tail call <4 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv4f32.nxv4f32.nxv4i32.i32.iXLen(iXLen 3, <4 x float> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl) + ret <4 x float> %0 } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i32.f32.iXLen(iXLen, <4 x i32>, <4 x i32>, float, iXLen) +declare <4 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv4f32.nxv4f32.nxv4i32.i32.iXLen(iXLen, <4 x float>, <4 x i32>, i32, iXLen) -define void @test_sf_vc_fvv_se_e32m4(<8 x i32> %vd, <8 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e32m4: +define void @test_sf_vc_fvvx_se_e32m4(<8 x float> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvx_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v10, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i32.f32.iXLen(iXLen 1, <8 x i32> %vd, <8 x i32> %vs2, float %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f32.nxv8i32.i32.iXLen(iXLen 3, <8 x float> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i32.f32.iXLen(iXLen, <8 x i32>, <8 x i32>, float, iXLen) +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f32.nxv8i32.i32.iXLen(iXLen, <8 x float>, <8 x i32>, i32, iXLen) -define void @test_sf_vc_fvv_se_e32m8(<16 x i32> %vd, <16 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e32m8: +define <8 x float> @test_sf_vc_v_fvvx_se_e32m4(<8 x float> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i32.f32.iXLen(iXLen 1, <16 x i32> %vd, <16 x i32> %vs2, float %fs1, iXLen %vl) - ret void + %0 = tail call <8 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv8f32.nxv8f32.nxv8i32.i32.iXLen(iXLen 3, <8 x float> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl) + ret <8 x float> %0 } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i32.f32.iXLen(iXLen, <16 x i32>, <16 x i32>, float, iXLen) +declare <8 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv8f32.nxv8f32.nxv8i32.i32.iXLen(iXLen, <8 x float>, <8 x i32>, i32, iXLen) -define void @test_sf_vc_fvv_se_e64m1(<1 x i64> %vd, <1 x i64> %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e64m1: +define void @test_sf_vc_fvvx_se_e32m8(<16 x float> %vd, <16 x i32> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvx_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v12, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i64.f64.iXLen(iXLen 1, <1 x i64> %vd, <1 x i64> %vs2, double %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f32.nxv16i32.i32.iXLen(iXLen 3, <16 x float> %vd, <16 x i32> %vs2, i32 %rs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i64.f64.iXLen(iXLen, <1 x i64>, <1 x i64>, double, iXLen) +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f32.nxv16i32.i32.iXLen(iXLen, <16 x float>, <16 x i32>, i32, iXLen) -define void @test_sf_vc_fvv_se_e64m2(<2 x i64> %vd, <2 x i64> %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e64m2: +define <16 x float> @test_sf_vc_v_fvvx_se_e32m8(<16 x float> %vd, <16 x i32> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i64.f64.iXLen(iXLen 1, <2 x i64> %vd, <2 x i64> %vs2, double %fs1, iXLen %vl) - ret void + %0 = tail call <16 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv16f32.nxv16f32.nxv16i32.i32.iXLen(iXLen 3, <16 x float> %vd, <16 x i32> %vs2, i32 %rs1, iXLen %vl) + ret <16 x float> %0 } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i64.f64.iXLen(iXLen, <2 x i64>, <2 x i64>, double, iXLen) +declare <16 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv16f32.nxv16f32.nxv16i32.i32.iXLen(iXLen, <16 x float>, <16 x i32>, i32, iXLen) -define void @test_sf_vc_fvv_se_e64m4(<4 x i64> %vd, <4 x i64> %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e64m4: +define void @test_sf_vc_fvvi_se_e16mf4(<1 x half> %vd, <1 x i16> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvi_se_e16mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i64.f64.iXLen(iXLen 1, <4 x i64> %vd, <4 x i64> %vs2, double %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f16.nxv1i16.iXLen.iXLen(iXLen 3, <1 x half> %vd, <1 x i16> %vs2, iXLen 3, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i64.f64.iXLen(iXLen, <4 x i64>, <4 x i64>, double, iXLen) +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f16.nxv1i16.iXLen.iXLen(iXLen, <1 x half>, <1 x i16>, iXLen, iXLen) -define void @test_sf_vc_fvv_se_e64m8(<8 x i64> %vd, <8 x i64> %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e64m8: +define <1 x half> @test_sf_vc_fv_fvvi_se_e16mf4(<1 x half> %vd, <1 x i16> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i64.f64.iXLen(iXLen 1, <8 x i64> %vd, <8 x i64> %vs2, double %fs1, iXLen %vl) - ret void + %0 = tail call <1 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv1f16.nxv1f16.nxv1i16.iXLen.iXLen(iXLen 3, <1 x half> %vd, <1 x i16> %vs2, iXLen 3, iXLen %vl) + ret <1 x half> %0 } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i64.f64.iXLen(iXLen, <8 x i64>, <8 x i64>, double, iXLen) +declare <1 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv1f16.nxv1f16.nxv1i16.iXLen.iXLen(iXLen, <1 x half>, <1 x i16>, iXLen, iXLen) -define <1 x i16> @test_sf_vc_v_fvv_se_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf4: +define void @test_sf_vc_fvvi_se_e16mf2(<2 x half> %vd, <2 x i16> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvi_se_e16mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.iXLen.f16.iXLen(iXLen 1, <1 x i16> %vd, <1 x i16> %vs2, half %fs1, iXLen %vl) - ret <1 x i16> %0 + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f16.nxv2i16.iXLen.iXLen(iXLen 3, <2 x half> %vd, <2 x i16> %vs2, iXLen 3, iXLen %vl) + ret void } -declare <1 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.iXLen.f16.iXLen(iXLen, <1 x i16>, <1 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f16.nxv2i16.iXLen.iXLen(iXLen, <2 x half>, <2 x i16>, iXLen, iXLen) -define <2 x i16> @test_sf_vc_v_fvv_se_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf2: +define <2 x half> @test_sf_vc_fv_fvvi_se_e16mf2(<2 x half> %vd, <2 x i16> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.iXLen.f16.iXLen(iXLen 1, <2 x i16> %vd, <2 x i16> %vs2, half %fs1, iXLen %vl) - ret <2 x i16> %0 + %0 = tail call <2 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv2f16.nxv2f16.nxv2i16.iXLen.iXLen(iXLen 3, <2 x half> %vd, <2 x i16> %vs2, iXLen 3, iXLen %vl) + ret <2 x half> %0 } -declare <2 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.iXLen.f16.iXLen(iXLen, <2 x i16>, <2 x i16>, half, iXLen) +declare <2 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv2f16.nxv2f16.nxv2i16.iXLen.iXLen(iXLen, <2 x half>, <2 x i16>, iXLen, iXLen) -define <4 x i16> @test_sf_vc_v_fvv_se_e16m1(<4 x i16> %vd, <4 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m1: +define void @test_sf_vc_fvvi_se_e16m1(<4 x half> %vd, <4 x i16> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvi_se_e16m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.iXLen.f16.iXLen(iXLen 1, <4 x i16> %vd, <4 x i16> %vs2, half %fs1, iXLen %vl) - ret <4 x i16> %0 + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f16.nxv4i16.iXLen.iXLen(iXLen 3, <4 x half> %vd, <4 x i16> %vs2, iXLen 3, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f16.nxv4i16.iXLen.iXLen(iXLen, <4 x half>, <4 x i16>, iXLen, iXLen) + +define <4 x half> @test_sf_vc_fv_fvvi_se_e16m1(<4 x half> %vd, <4 x i16> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 3 +; CHECK-NEXT: ret +entry: + %0 = tail call <4 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv4f16.nxv4f16.nxv4i16.iXLen.iXLen(iXLen 3, <4 x half> %vd, <4 x i16> %vs2, iXLen 3, iXLen %vl) + ret <4 x half> %0 } -declare <4 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.iXLen.f16.iXLen(iXLen, <4 x i16>, <4 x i16>, half, iXLen) +declare <4 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv4f16.nxv4f16.nxv4i16.iXLen.iXLen(iXLen, <4 x half>, <4 x i16>, iXLen, iXLen) -define <8 x i16> @test_sf_vc_v_fvv_se_e16m2(<8 x i16> %vd, <8 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m2: +define void @test_sf_vc_fvvi_se_e16m2(<8 x half> %vd, <8 x i16> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvi_se_e16m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.iXLen.f16.iXLen(iXLen 1, <8 x i16> %vd, <8 x i16> %vs2, half %fs1, iXLen %vl) - ret <8 x i16> %0 + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f16.nxv8i16.iXLen.iXLen(iXLen 3, <8 x half> %vd, <8 x i16> %vs2, iXLen 3, iXLen %vl) + ret void } -declare <8 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.iXLen.f16.iXLen(iXLen, <8 x i16>, <8 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f16.nxv8i16.iXLen.iXLen(iXLen, <8 x half>, <8 x i16>, iXLen, iXLen) -define <16 x i16> @test_sf_vc_v_fvv_se_e16m4(<16 x i16> %vd, <16 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m4: +define <8 x half> @test_sf_vc_fv_fvvi_se_e16m2(<8 x half> %vd, <8 x i16> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 3 +; CHECK-NEXT: ret +entry: + %0 = tail call <8 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv8f16.nxv8f16.nxv8i16.iXLen.iXLen(iXLen 3, <8 x half> %vd, <8 x i16> %vs2, iXLen 3, iXLen %vl) + ret <8 x half> %0 +} + +declare <8 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv8f16.nxv8f16.nxv8i16.iXLen.iXLen(iXLen, <8 x half>, <8 x i16>, iXLen, iXLen) + +define void @test_sf_vc_fvvi_se_e16m4(<16 x half> %vd, <16 x i16> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvi_se_e16m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0 +; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 3 ; CHECK-NEXT: ret entry: - %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.iXLen.f16.iXLen(iXLen 1, <16 x i16> %vd, <16 x i16> %vs2, half %fs1, iXLen %vl) - ret <16 x i16> %0 + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f16.nxv16i16.iXLen.iXLen(iXLen 3, <16 x half> %vd, <16 x i16> %vs2, iXLen 3, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f16.nxv16i16.iXLen.iXLen(iXLen, <16 x half>, <16 x i16>, iXLen, iXLen) + +define <16 x half> @test_sf_vc_fv_fvvi_se_e16m4(<16 x half> %vd, <16 x i16> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 3 +; CHECK-NEXT: ret +entry: + %0 = tail call <16 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv16f16.nxv16f16.nxv16i16.iXLen.iXLen(iXLen 3, <16 x half> %vd, <16 x i16> %vs2, iXLen 3, iXLen %vl) + ret <16 x half> %0 } -declare <16 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.iXLen.f16.iXLen(iXLen, <16 x i16>, <16 x i16>, half, iXLen) +declare <16 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv16f16.nxv16f16.nxv16i16.iXLen.iXLen(iXLen, <16 x half>, <16 x i16>, iXLen, iXLen) -define <32 x i16> @test_sf_vc_v_fvv_se_e16m8(<32 x i16> %vd, <32 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m8: +define void @test_sf_vc_fvvi_se_e16m8(<32 x half> %vd, <32 x i16> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvi_se_e16m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0 +; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 3 ; CHECK-NEXT: ret entry: - %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.iXLen.f16.iXLen(iXLen 1, <32 x i16> %vd, <32 x i16> %vs2, half %fs1, iXLen %vl) - ret <32 x i16> %0 + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32f16.nxv32i16.iXLen.iXLen(iXLen 3, <32 x half> %vd, <32 x i16> %vs2, iXLen 3, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32f16.nxv32i16.iXLen.iXLen(iXLen, <32 x half>, <32 x i16>, iXLen, iXLen) + +define <32 x half> @test_sf_vc_fv_fvvi_se_e16m8(<32 x half> %vd, <32 x i16> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 3 +; CHECK-NEXT: ret +entry: + %0 = tail call <32 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv32f16.nxv32f16.nxv32i16.iXLen.iXLen(iXLen 3, <32 x half> %vd, <32 x i16> %vs2, iXLen 3, iXLen %vl) + ret <32 x half> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.iXLen.f16.iXLen(iXLen, <32 x i16>, <32 x i16>, half, iXLen) +declare <32 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv32f16.nxv32f16.nxv32i16.iXLen.iXLen(iXLen, <32 x half>, <32 x i16>, iXLen, iXLen) -define <1 x i32> @test_sf_vc_v_fvv_se_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e32mf2: +define void @test_sf_vc_fvvi_se_e32mf2(<1 x float> %vd, <1 x i32> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvi_se_e32mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv1i32.iXLen.f32.iXLen(iXLen 1, <1 x i32> %vd, <1 x i32> %vs2, float %fs1, iXLen %vl) - ret <1 x i32> %0 + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f32.nxv1i32.iXLen.iXLen(iXLen 3, <1 x float> %vd, <1 x i32> %vs2, iXLen 3, iXLen %vl) + ret void } -declare <1 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv1i32.iXLen.f32.iXLen(iXLen, <1 x i32>, <1 x i32>, float, iXLen) +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f32.nxv1i32.iXLen.iXLen(iXLen, <1 x float>, <1 x i32>, iXLen, iXLen) -define <2 x i32> @test_sf_vc_v_fvv_se_e32m1(<2 x i32> %vd, <2 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m1: +define <1 x float> @test_sf_vc_fv_fvvi_se_e32mf2(<1 x float> %vd, <1 x i32> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv2i32.iXLen.f32.iXLen(iXLen 1, <2 x i32> %vd, <2 x i32> %vs2, float %fs1, iXLen %vl) - ret <2 x i32> %0 + %0 = tail call <1 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv1f32.nxv1f32.nxv1i32.iXLen.iXLen(iXLen 3, <1 x float> %vd, <1 x i32> %vs2, iXLen 3, iXLen %vl) + ret <1 x float> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv2i32.iXLen.f32.iXLen(iXLen, <2 x i32>, <2 x i32>, float, iXLen) +declare <1 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv1f32.nxv1f32.nxv1i32.iXLen.iXLen(iXLen, <1 x float>, <1 x i32>, iXLen, iXLen) -define <4 x i32> @test_sf_vc_v_fvv_se_e32m2(<4 x i32> %vd, <4 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m2: +define void @test_sf_vc_fvvi_se_e32m1(<2 x float> %vd, <2 x i32> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvi_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 3 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f32.nxv2i32.iXLen.iXLen(iXLen 3, <2 x float> %vd, <2 x i32> %vs2, iXLen 3, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f32.nxv2i32.iXLen.iXLen(iXLen, <2 x float>, <2 x i32>, iXLen, iXLen) + +define <2 x float> @test_sf_vc_fv_fvvi_se_e32m1(<2 x float> %vd, <2 x i32> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 3 +; CHECK-NEXT: ret +entry: + %0 = tail call <2 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv2f32.nxv2f32.nxv2i32.iXLen.iXLen(iXLen 3, <2 x float> %vd, <2 x i32> %vs2, iXLen 3, iXLen %vl) + ret <2 x float> %0 +} + +declare <2 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv2f32.nxv2f32.nxv2i32.iXLen.iXLen(iXLen, <2 x float>, <2 x i32>, iXLen, iXLen) + +define void @test_sf_vc_fvvi_se_e32m2(<4 x float> %vd, <4 x i32> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvi_se_e32m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv4i32.iXLen.f32.iXLen(iXLen 1, <4 x i32> %vd, <4 x i32> %vs2, float %fs1, iXLen %vl) - ret <4 x i32> %0 + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f32.nxv4i32.iXLen.iXLen(iXLen 3, <4 x float> %vd, <4 x i32> %vs2, iXLen 3, iXLen %vl) + ret void } -declare <4 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv4i32.iXLen.f32.iXLen(iXLen, <4 x i32>, <4 x i32>, float, iXLen) +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f32.nxv4i32.iXLen.iXLen(iXLen, <4 x float>, <4 x i32>, iXLen, iXLen) -define <8 x i32> @test_sf_vc_v_fvv_se_e32m4(<8 x i32> %vd, <8 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m4: +define <4 x float> @test_sf_vc_fv_fvvi_se_e32m2(<4 x float> %vd, <4 x i32> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv8i32.iXLen.f32.iXLen(iXLen 1, <8 x i32> %vd, <8 x i32> %vs2, float %fs1, iXLen %vl) - ret <8 x i32> %0 + %0 = tail call <4 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv4f32.nxv4f32.nxv4i32.iXLen.iXLen(iXLen 3, <4 x float> %vd, <4 x i32> %vs2, iXLen 3, iXLen %vl) + ret <4 x float> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv8i32.iXLen.f32.iXLen(iXLen, <8 x i32>, <8 x i32>, float, iXLen) +declare <4 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv4f32.nxv4f32.nxv4i32.iXLen.iXLen(iXLen, <4 x float>, <4 x i32>, iXLen, iXLen) -define <16 x i32> @test_sf_vc_v_fvv_se_e32m8(<16 x i32> %vd, <16 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m8: +define void @test_sf_vc_fvvi_se_e32m4(<8 x float> %vd, <8 x i32> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvi_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 3 ; CHECK-NEXT: ret entry: - %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv16i32.iXLen.f32.iXLen(iXLen 1, <16 x i32> %vd, <16 x i32> %vs2, float %fs1, iXLen %vl) - ret <16 x i32> %0 + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f32.nxv8i32.iXLen.iXLen(iXLen 3, <8 x float> %vd, <8 x i32> %vs2, iXLen 3, iXLen %vl) + ret void } -declare <16 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv16i32.iXLen.f32.iXLen(iXLen, <16 x i32>, <16 x i32>, float, iXLen) +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f32.nxv8i32.iXLen.iXLen(iXLen, <8 x float>, <8 x i32>, iXLen, iXLen) -define <1 x i64> @test_sf_vc_v_fvv_se_e64m1(<1 x i64> %vd, <1 x i64> %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m1: +define <8 x float> @test_sf_vc_fv_fvvi_se_e32m4(<8 x float> %vd, <8 x i32> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 3 ; CHECK-NEXT: ret entry: - %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv1i64.iXLen.f64.iXLen(iXLen 1, <1 x i64> %vd, <1 x i64> %vs2, double %fs1, iXLen %vl) - ret <1 x i64> %0 + %0 = tail call <8 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv8f32.nxv8f32.nxv8i32.iXLen.iXLen(iXLen 3, <8 x float> %vd, <8 x i32> %vs2, iXLen 3, iXLen %vl) + ret <8 x float> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv1i64.iXLen.f64.iXLen(iXLen, <1 x i64>, <1 x i64>, double, iXLen) +declare <8 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv8f32.nxv8f32.nxv8i32.iXLen.iXLen(iXLen, <8 x float>, <8 x i32>, iXLen, iXLen) -define <2 x i64> @test_sf_vc_v_fvv_se_e64m2(<2 x i64> %vd, <2 x i64> %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m2: +define void @test_sf_vc_fvvi_se_e32m8(<16 x float> %vd, <16 x i32> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvi_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 3 ; CHECK-NEXT: ret entry: - %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv2i64.iXLen.f64.iXLen(iXLen 1, <2 x i64> %vd, <2 x i64> %vs2, double %fs1, iXLen %vl) - ret <2 x i64> %0 + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f32.nxv16i32.iXLen.iXLen(iXLen 3, <16 x float> %vd, <16 x i32> %vs2, iXLen 3, iXLen %vl) + ret void } -declare <2 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv2i64.iXLen.f64.iXLen(iXLen, <2 x i64>, <2 x i64>, double, iXLen) +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f32.nxv16i32.iXLen.iXLen(iXLen, <16 x float>, <16 x i32>, iXLen, iXLen) -define <4 x i64> @test_sf_vc_v_fvv_se_e64m4(<4 x i64> %vd, <4 x i64> %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m4: +define <16 x float> @test_sf_vc_fv_fvvi_se_e32m8(<16 x float> %vd, <16 x i32> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 3 ; CHECK-NEXT: ret entry: - %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv4i64.iXLen.f64.iXLen(iXLen 1, <4 x i64> %vd, <4 x i64> %vs2, double %fs1, iXLen %vl) - ret <4 x i64> %0 + %0 = tail call <16 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv16f32.nxv16f32.nxv16i32.iXLen.iXLen(iXLen 3, <16 x float> %vd, <16 x i32> %vs2, iXLen 3, iXLen %vl) + ret <16 x float> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv4i64.iXLen.f64.iXLen(iXLen, <4 x i64>, <4 x i64>, double, iXLen) +declare <16 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv16f32.nxv16f32.nxv16i32.iXLen.iXLen(iXLen, <16 x float>, <16 x i32>, iXLen, iXLen) -define <8 x i64> @test_sf_vc_v_fvv_se_e64m8(<8 x i64> %vd, <8 x i64> %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m8: +define void @test_sf_vc_fvvf_se_e16mf4(<1 x half> %vd, <1 x i16> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvf_se_e16mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv8i64.iXLen.f64.iXLen(iXLen 1, <8 x i64> %vd, <8 x i64> %vs2, double %fs1, iXLen %vl) - ret <8 x i64> %0 + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f16.nxv1i16.f16.iXLen(iXLen 1, <1 x half> %vd, <1 x i16> %vs2, half %rs1, iXLen %vl) + ret void } -declare <8 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv8i64.iXLen.f64.iXLen(iXLen, <8 x i64>, <8 x i64>, double, iXLen) +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f16.nxv1i16.f16.iXLen(iXLen, <1 x half>, <1 x i16>, half, iXLen) -define <1 x i16> @test_sf_vc_v_fvv_e16mf4(<1 x i16> %vd, <1 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e16mf4: +define <1 x half> @test_sf_vc_fv_fvvf_se_e16mf4(<1 x half> %vd, <1 x i16> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16mf4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <1 x i16> @llvm.riscv.sf.vc.v.fvv.nxv1i16.iXLen.f16.iXLen(iXLen 1, <1 x i16> %vd, <1 x i16> %vs2, half %fs1, iXLen %vl) - ret <1 x i16> %0 + %0 = tail call <1 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv1f16.nxv1f16.nxv1i16.f16.iXLen(iXLen 1, <1 x half> %vd, <1 x i16> %vs2, half %rs1, iXLen %vl) + ret <1 x half> %0 +} + +declare <1 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv1f16.nxv1f16.nxv1i16.f16.iXLen(iXLen, <1 x half>, <1 x i16>, half %rs1, iXLen) + +define void @test_sf_vc_fvvf_se_e16mf2(<2 x half> %vd, <2 x i16> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvf_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f16.nxv2i16.f16.iXLen(iXLen 1, <2 x half> %vd, <2 x i16> %vs2, half %rs1, iXLen %vl) + ret void } -declare <1 x i16> @llvm.riscv.sf.vc.v.fvv.nxv1i16.iXLen.f16.iXLen(iXLen, <1 x i16>, <1 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f16.nxv2i16.f16.iXLen(iXLen, <2 x half>, <2 x i16>, half, iXLen) -define <2 x i16> @test_sf_vc_v_fvv_e16mf2(<2 x i16> %vd, <2 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e16mf2: +define <2 x half> @test_sf_vc_fv_fvvf_se_e16mf2(<2 x half> %vd, <2 x i16> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <2 x i16> @llvm.riscv.sf.vc.v.fvv.nxv2i16.iXLen.f16.iXLen(iXLen 1, <2 x i16> %vd, <2 x i16> %vs2, half %fs1, iXLen %vl) - ret <2 x i16> %0 + %0 = tail call <2 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv2f16.nxv2f16.nxv2i16.f16.iXLen(iXLen 1, <2 x half> %vd, <2 x i16> %vs2, half %rs1, iXLen %vl) + ret <2 x half> %0 +} + +declare <2 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv2f16.nxv2f16.nxv2i16.f16.iXLen(iXLen, <2 x half>, <2 x i16>, half %rs1, iXLen) + +define void @test_sf_vc_fvvf_se_e16m1(<4 x half> %vd, <4 x i16> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvf_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f16.nxv4i16.f16.iXLen(iXLen 1, <4 x half> %vd, <4 x i16> %vs2, half %rs1, iXLen %vl) + ret void } -declare <2 x i16> @llvm.riscv.sf.vc.v.fvv.nxv2i16.iXLen.f16.iXLen(iXLen, <2 x i16>, <2 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f16.nxv4i16.f16.iXLen(iXLen, <4 x half>, <4 x i16>, half, iXLen) -define <4 x i16> @test_sf_vc_v_fvv_e16m1(<4 x i16> %vd, <4 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e16m1: +define <4 x half> @test_sf_vc_fv_fvvf_se_e16m1(<4 x half> %vd, <4 x i16> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <4 x i16> @llvm.riscv.sf.vc.v.fvv.nxv4i16.iXLen.f16.iXLen(iXLen 1, <4 x i16> %vd, <4 x i16> %vs2, half %fs1, iXLen %vl) - ret <4 x i16> %0 + %0 = tail call <4 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv4f16.nxv4f16.nxv4i16.f16.iXLen(iXLen 1, <4 x half> %vd, <4 x i16> %vs2, half %rs1, iXLen %vl) + ret <4 x half> %0 +} + +declare <4 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv4f16.nxv4f16.nxv4i16.f16.iXLen(iXLen, <4 x half>, <4 x i16>, half %rs1, iXLen) + +define void @test_sf_vc_fvvf_se_e16m2(<8 x half> %vd, <8 x i16> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvf_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f16.nxv8i16.f16.iXLen(iXLen 1, <8 x half> %vd, <8 x i16> %vs2, half %rs1, iXLen %vl) + ret void } -declare <4 x i16> @llvm.riscv.sf.vc.v.fvv.nxv4i16.iXLen.f16.iXLen(iXLen, <4 x i16>, <4 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f16.nxv8i16.f16.iXLen(iXLen, <8 x half>, <8 x i16>, half, iXLen) -define <8 x i16> @test_sf_vc_v_fvv_e16m2(<8 x i16> %vd, <8 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e16m2: +define <8 x half> @test_sf_vc_fv_fvvf_se_e16m2(<8 x half> %vd, <8 x i16> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <8 x i16> @llvm.riscv.sf.vc.v.fvv.nxv8i16.iXLen.f16.iXLen(iXLen 1, <8 x i16> %vd, <8 x i16> %vs2, half %fs1, iXLen %vl) - ret <8 x i16> %0 + %0 = tail call <8 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv8f16.nxv8f16.nxv8i16.f16.iXLen(iXLen 1, <8 x half> %vd, <8 x i16> %vs2, half %rs1, iXLen %vl) + ret <8 x half> %0 +} + +declare <8 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv8f16.nxv8f16.nxv8i16.f16.iXLen(iXLen, <8 x half>, <8 x i16>, half %rs1, iXLen) + +define void @test_sf_vc_fvvf_se_e16m4(<16 x half> %vd, <16 x i16> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvf_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f16.nxv16i16.f16.iXLen(iXLen 1, <16 x half> %vd, <16 x i16> %vs2, half %rs1, iXLen %vl) + ret void } -declare <8 x i16> @llvm.riscv.sf.vc.v.fvv.nxv8i16.iXLen.f16.iXLen(iXLen, <8 x i16>, <8 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f16.nxv16i16.f16.iXLen(iXLen, <16 x half>, <16 x i16>, half, iXLen) -define <16 x i16> @test_sf_vc_v_fvv_e16m4(<16 x i16> %vd, <16 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e16m4: +define <16 x half> @test_sf_vc_fv_fvvf_se_e16m4(<16 x half> %vd, <16 x i16> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <16 x i16> @llvm.riscv.sf.vc.v.fvv.nxv16i16.iXLen.f16.iXLen(iXLen 1, <16 x i16> %vd, <16 x i16> %vs2, half %fs1, iXLen %vl) - ret <16 x i16> %0 + %0 = tail call <16 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv16f16.nxv16f16.nxv16i16.f16.iXLen(iXLen 1, <16 x half> %vd, <16 x i16> %vs2, half %rs1, iXLen %vl) + ret <16 x half> %0 +} + +declare <16 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv16f16.nxv16f16.nxv16i16.f16.iXLen(iXLen, <16 x half>, <16 x i16>, half %rs1, iXLen) + +define void @test_sf_vc_fvvf_se_e16m8(<32 x half> %vd, <32 x i16> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvf_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32f16.nxv32i16.f16.iXLen(iXLen 1, <32 x half> %vd, <32 x i16> %vs2, half %rs1, iXLen %vl) + ret void } -declare <16 x i16> @llvm.riscv.sf.vc.v.fvv.nxv16i16.iXLen.f16.iXLen(iXLen, <16 x i16>, <16 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32f16.nxv32i16.f16.iXLen(iXLen, <32 x half>, <32 x i16>, half, iXLen) -define <32 x i16> @test_sf_vc_v_fvv_e16m8(<32 x i16> %vd, <32 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e16m8: +define <32 x half> @test_sf_vc_fv_fvvf_se_e16m8(<32 x half> %vd, <32 x i16> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <32 x i16> @llvm.riscv.sf.vc.v.fvv.nxv32i16.iXLen.f16.iXLen(iXLen 1, <32 x i16> %vd, <32 x i16> %vs2, half %fs1, iXLen %vl) - ret <32 x i16> %0 + %0 = tail call <32 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv32f16.nxv32f16.nxv32i16.f16.iXLen(iXLen 1, <32 x half> %vd, <32 x i16> %vs2, half %rs1, iXLen %vl) + ret <32 x half> %0 } -declare <32 x i16> @llvm.riscv.sf.vc.v.fvv.nxv32i16.iXLen.f16.iXLen(iXLen, <32 x i16>, <32 x i16>, half, iXLen) +declare <32 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv32f16.nxv32f16.nxv32i16.f16.iXLen(iXLen, <32 x half>, <32 x i16>, half %rs1, iXLen) -define <1 x i32> @test_sf_vc_v_fvv_e32mf2(<1 x i32> %vd, <1 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e32mf2: +define void @test_sf_vc_fvvf_se_e32mf2(<1 x float> %vd, <1 x i32> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvf_se_e32mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.fvv.nxv1i32.iXLen.f32.iXLen(iXLen 1, <1 x i32> %vd, <1 x i32> %vs2, float %fs1, iXLen %vl) - ret <1 x i32> %0 + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f32.nxv1i32.f32.iXLen(iXLen 1, <1 x float> %vd, <1 x i32> %vs2, float %rs1, iXLen %vl) + ret void } -declare <1 x i32> @llvm.riscv.sf.vc.v.fvv.nxv1i32.iXLen.f32.iXLen(iXLen, <1 x i32>, <1 x i32>, float, iXLen) +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f32.nxv1i32.f32.iXLen(iXLen, <1 x float>, <1 x i32>, float, iXLen) -define <2 x i32> @test_sf_vc_v_fvv_e32m1(<2 x i32> %vd, <2 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e32m1: +define <1 x float> @test_sf_vc_fv_fvvf_se_e32mf2(<1 x float> %vd, <1 x i32> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.fvv.nxv2i32.iXLen.f32.iXLen(iXLen 1, <2 x i32> %vd, <2 x i32> %vs2, float %fs1, iXLen %vl) - ret <2 x i32> %0 + %0 = tail call <1 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv1f32.nxv1f32.nxv1i32.f32.iXLen(iXLen 1, <1 x float> %vd, <1 x i32> %vs2, float %rs1, iXLen %vl) + ret <1 x float> %0 } -declare <2 x i32> @llvm.riscv.sf.vc.v.fvv.nxv2i32.iXLen.f32.iXLen(iXLen, <2 x i32>, <2 x i32>, float, iXLen) +declare <1 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv1f32.nxv1f32.nxv1i32.f32.iXLen(iXLen, <1 x float>, <1 x i32>, float %rs1, iXLen) -define <4 x i32> @test_sf_vc_v_fvv_e32m2(<4 x i32> %vd, <4 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e32m2: +define void @test_sf_vc_fvvf_se_e32m1(<2 x float> %vd, <2 x i32> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvf_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.fvv.nxv4i32.iXLen.f32.iXLen(iXLen 1, <4 x i32> %vd, <4 x i32> %vs2, float %fs1, iXLen %vl) - ret <4 x i32> %0 + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f32.nxv2i32.f32.iXLen(iXLen 1, <2 x float> %vd, <2 x i32> %vs2, float %rs1, iXLen %vl) + ret void } -declare <4 x i32> @llvm.riscv.sf.vc.v.fvv.nxv4i32.iXLen.f32.iXLen(iXLen, <4 x i32>, <4 x i32>, float, iXLen) +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f32.nxv2i32.f32.iXLen(iXLen, <2 x float>, <2 x i32>, float, iXLen) -define <8 x i32> @test_sf_vc_v_fvv_e32m4(<8 x i32> %vd, <8 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e32m4: +define <2 x float> @test_sf_vc_fv_fvvf_se_e32m1(<2 x float> %vd, <2 x i32> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.fvv.nxv8i32.iXLen.f32.iXLen(iXLen 1, <8 x i32> %vd, <8 x i32> %vs2, float %fs1, iXLen %vl) - ret <8 x i32> %0 + %0 = tail call <2 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv2f32.nxv2f32.nxv2i32.f32.iXLen(iXLen 1, <2 x float> %vd, <2 x i32> %vs2, float %rs1, iXLen %vl) + ret <2 x float> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.fvv.nxv8i32.iXLen.f32.iXLen(iXLen, <8 x i32>, <8 x i32>, float, iXLen) +declare <2 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv2f32.nxv2f32.nxv2i32.f32.iXLen(iXLen, <2 x float>, <2 x i32>, float %rs1, iXLen) -define <16 x i32> @test_sf_vc_v_fvv_e32m8(<16 x i32> %vd, <16 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e32m8: +define void @test_sf_vc_fvvf_se_e32m2(<4 x float> %vd, <4 x i32> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvf_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.fvv.nxv16i32.iXLen.f32.iXLen(iXLen 1, <16 x i32> %vd, <16 x i32> %vs2, float %fs1, iXLen %vl) - ret <16 x i32> %0 + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f32.nxv4i32.f32.iXLen(iXLen 1, <4 x float> %vd, <4 x i32> %vs2, float %rs1, iXLen %vl) + ret void } -declare <16 x i32> @llvm.riscv.sf.vc.v.fvv.nxv16i32.iXLen.f32.iXLen(iXLen, <16 x i32>, <16 x i32>, float, iXLen) +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f32.nxv4i32.f32.iXLen(iXLen, <4 x float>, <4 x i32>, float, iXLen) -define <1 x i64> @test_sf_vc_v_fvv_e64m1(<1 x i64> %vd, <1 x i64> %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e64m1: +define <4 x float> @test_sf_vc_fv_fvvf_se_e32m2(<4 x float> %vd, <4 x i32> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.fvv.nxv1i64.iXLen.f64.iXLen(iXLen 1, <1 x i64> %vd, <1 x i64> %vs2, double %fs1, iXLen %vl) - ret <1 x i64> %0 + %0 = tail call <4 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv4f32.nxv4f32.nxv4i32.f32.iXLen(iXLen 1, <4 x float> %vd, <4 x i32> %vs2, float %rs1, iXLen %vl) + ret <4 x float> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.fvv.nxv1i64.iXLen.f64.iXLen(iXLen, <1 x i64>, <1 x i64>, double, iXLen) +declare <4 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv4f32.nxv4f32.nxv4i32.f32.iXLen(iXLen, <4 x float>, <4 x i32>, float %rs1, iXLen) -define <2 x i64> @test_sf_vc_v_fvv_e64m2(<2 x i64> %vd, <2 x i64> %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e64m2: +define void @test_sf_vc_fvvf_se_e32m4(<8 x float> %vd, <8 x i32> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvf_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.fvv.nxv2i64.iXLen.f64.iXLen(iXLen 1, <2 x i64> %vd, <2 x i64> %vs2, double %fs1, iXLen %vl) - ret <2 x i64> %0 + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f32.nxv8i32.f32.iXLen(iXLen 1, <8 x float> %vd, <8 x i32> %vs2, float %rs1, iXLen %vl) + ret void } -declare <2 x i64> @llvm.riscv.sf.vc.v.fvv.nxv2i64.iXLen.f64.iXLen(iXLen, <2 x i64>, <2 x i64>, double, iXLen) +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f32.nxv8i32.f32.iXLen(iXLen, <8 x float>, <8 x i32>, float, iXLen) -define <4 x i64> @test_sf_vc_v_fvv_e64m4(<4 x i64> %vd, <4 x i64> %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e64m4: +define <8 x float> @test_sf_vc_fv_fvvf_se_e32m4(<8 x float> %vd, <8 x i32> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.fvv.nxv4i64.iXLen.f64.iXLen(iXLen 1, <4 x i64> %vd, <4 x i64> %vs2, double %fs1, iXLen %vl) - ret <4 x i64> %0 + %0 = tail call <8 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv8f32.nxv8f32.nxv8i32.f32.iXLen(iXLen 1, <8 x float> %vd, <8 x i32> %vs2, float %rs1, iXLen %vl) + ret <8 x float> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.fvv.nxv4i64.iXLen.f64.iXLen(iXLen, <4 x i64>, <4 x i64>, double, iXLen) +declare <8 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv8f32.nxv8f32.nxv8i32.f32.iXLen(iXLen, <8 x float>, <8 x i32>, float %rs1, iXLen) -define <8 x i64> @test_sf_vc_v_fvv_e64m8(<8 x i64> %vd, <8 x i64> %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e64m8: +define void @test_sf_vc_fvvf_se_e32m8(<16 x float> %vd, <16 x i32> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvf_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f32.nxv16i32.f32.iXLen(iXLen 1, <16 x float> %vd, <16 x i32> %vs2, float %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f32.nxv16i32.f32.iXLen(iXLen, <16 x float>, <16 x i32>, float, iXLen) + +define <16 x float> @test_sf_vc_fv_fvvf_se_e32m8(<16 x float> %vd, <16 x i32> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.fvv.nxv8i64.iXLen.f64.iXLen(iXLen 1, <8 x i64> %vd, <8 x i64> %vs2, double %fs1, iXLen %vl) - ret <8 x i64> %0 + %0 = tail call <16 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv16f32.nxv16f32.nxv16i32.f32.iXLen(iXLen 1, <16 x float> %vd, <16 x i32> %vs2, float %rs1, iXLen %vl) + ret <16 x float> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.fvv.nxv8i64.iXLen.f64.iXLen(iXLen, <8 x i64>, <8 x i64>, double, iXLen) +declare <16 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv16f32.nxv16f32.nxv16i32.f32.iXLen(iXLen, <16 x float>, <16 x i32>, float %rs1, iXLen) + diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvw.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvw.ll index 86257ead512c2..ec34791e6572a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvw.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-xsfvcp-xvw.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+xsfvcp \ +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+xsfvcp \ ; RUN: -verify-machineinstrs | FileCheck %s -; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+xsfvcp \ +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+xsfvcp \ ; RUN: -verify-machineinstrs | FileCheck %s define void @test_sf_vc_vvw_se_e8mf8(<1 x i16> %vd, <1 x i8> %vs2, <1 x i8> %vs1, iXLen %vl) { @@ -1759,353 +1759,939 @@ entry: declare <8 x i64> @llvm.riscv.sf.vc.v.ivw.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen, <8 x i64>, <8 x i32>, iXLen, iXLen) -define void @test_sf_vc_fvw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvw_se_e16mf4: +define void @test_sf_vc_fwvv_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvv_se_e32mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 +; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1i32.nxv1i16.f16.iXLen(iXLen 1, <1 x i32> %vd, <1 x i16> %vs2, half %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f32.nxv1i16.nxv1i16.iXLen(iXLen 3, <1 x float> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1i32.nxv1i16.f16.iXLen(iXLen, <1 x i32>, <1 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f32.nxv1i16.nxv1i16.iXLen(iXLen, <1 x float>, <1 x i16>, <1 x i16>, iXLen) + +define <1 x float> @test_sf_vc_fw_fwvvv_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call <1 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv1f32.nxv1i16.nxv1i16.iXLen(iXLen 3, <1 x float> %vd, <1 x i16> %vs2, <1 x i16> %vs1, iXLen %vl) + ret <1 x float> %0 +} + +declare <1 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv1f32.nxv1i16.nxv1i16.iXLen(iXLen, <1 x float>, <1 x i16>, <1 x i16>, iXLen) -define void @test_sf_vc_fvw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvw_se_e16mf2: +define void @test_sf_vc_fwvv_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvv_se_e32m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 +; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2i32.nxv2i16.f16.iXLen(iXLen 1, <2 x i32> %vd, <2 x i16> %vs2, half %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f32.nxv2i16.nxv2i16.iXLen(iXLen 3, <2 x float> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2i32.nxv2i16.f16.iXLen(iXLen, <2 x i32>, <2 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f32.nxv2i16.nxv2i16.iXLen(iXLen, <2 x float>, <2 x i16>, <2 x i16>, iXLen) -define void @test_sf_vc_fvw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvw_se_e16m1: +define <2 x float> @test_sf_vc_fw_fwvvv_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call <2 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv2f32.nxv2i16.nxv2i16.iXLen(iXLen 3, <2 x float> %vd, <2 x i16> %vs2, <2 x i16> %vs1, iXLen %vl) + ret <2 x float> %0 +} + +declare <2 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv2f32.nxv2i16.nxv2i16.iXLen(iXLen, <2 x float>, <2 x i16>, <2 x i16>, iXLen) + +define void @test_sf_vc_fwvv_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvv_se_e32m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 +; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4i32.nxv4i16.f16.iXLen(iXLen 1, <4 x i32> %vd, <4 x i16> %vs2, half %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f32.nxv4i16.nxv4i16.iXLen(iXLen 3, <4 x float> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4i32.nxv4i16.f16.iXLen(iXLen, <4 x i32>, <4 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f32.nxv4i16.nxv4i16.iXLen(iXLen, <4 x float>, <4 x i16>, <4 x i16>, iXLen) -define void @test_sf_vc_fvw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvw_se_e16m2: +define <4 x float> @test_sf_vc_fw_fwvvv_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call <4 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv4f32.nxv4i16.nxv4i16.iXLen(iXLen 3, <4 x float> %vd, <4 x i16> %vs2, <4 x i16> %vs1, iXLen %vl) + ret <4 x float> %0 +} + +declare <4 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv4f32.nxv4i16.nxv4i16.iXLen(iXLen, <4 x float>, <4 x i16>, <4 x i16>, iXLen) + +define void @test_sf_vc_fwvv_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvv_se_e32m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v10, fa0 +; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8i32.nxv8i16.f16.iXLen(iXLen 1, <8 x i32> %vd, <8 x i16> %vs2, half %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f32.nxv8i16.nxv8i16.iXLen(iXLen 3, <8 x float> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8i32.nxv8i16.f16.iXLen(iXLen, <8 x i32>, <8 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f32.nxv8i16.nxv8i16.iXLen(iXLen, <8 x float>, <8 x i16>, <8 x i16>, iXLen) + +define <8 x float> @test_sf_vc_fw_fwvvv_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call <8 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv8f32.nxv8i16.nxv8i16.iXLen(iXLen 3, <8 x float> %vd, <8 x i16> %vs2, <8 x i16> %vs1, iXLen %vl) + ret <8 x float> %0 +} + +declare <8 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv8f32.nxv8i16.nxv8i16.iXLen(iXLen, <8 x float>, <8 x i16>, <8 x i16>, iXLen) -define void @test_sf_vc_fvw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvw_se_e16m4: +define void @test_sf_vc_fwvv_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvv_se_e32m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v12, fa0 +; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16i32.nxv16i16.f16.iXLen(iXLen 1, <16 x i32> %vd, <16 x i16> %vs2, half %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16f32.nxv16i16.nxv16i16.iXLen(iXLen 3, <16 x float> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16i32.nxv16i16.f16.iXLen(iXLen, <16 x i32>, <16 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16f32.nxv16i16.nxv16i16.iXLen(iXLen, <16 x float>, <16 x i16>, <16 x i16>, iXLen) -define void @test_sf_vc_fvw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvw_se_e32mf2: +define <16 x float> @test_sf_vc_fw_fwvvv_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call <16 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv16f32.nxv16i16.nxv16i16.iXLen(iXLen 3, <16 x float> %vd, <16 x i16> %vs2, <16 x i16> %vs1, iXLen %vl) + ret <16 x float> %0 +} + +declare <16 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv16f32.nxv16i16.nxv16i16.iXLen(iXLen, <16 x float>, <16 x i16>, <16 x i16>, iXLen) + +define void @test_sf_vc_fwvv_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvv_se_e64m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 +; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1i64.nxv1i32.f32.iXLen(iXLen 1, <1 x i64> %vd, <1 x i32> %vs2, float %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f64.nxv1i32.nxv1i32.iXLen(iXLen 3, <1 x double> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1i64.nxv1i32.f32.iXLen(iXLen, <1 x i64>, <1 x i32>, float, iXLen) +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f64.nxv1i32.nxv1i32.iXLen(iXLen, <1 x double>, <1 x i32>, <1 x i32>, iXLen) + +define <1 x double> @test_sf_vc_fw_fwvvv_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call <1 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv1f64.nxv1i32.nxv1i32.iXLen(iXLen 3, <1 x double> %vd, <1 x i32> %vs2, <1 x i32> %vs1, iXLen %vl) + ret <1 x double> %0 +} + +declare <1 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv1f64.nxv1i32.nxv1i32.iXLen(iXLen, <1 x double>, <1 x i32>, <1 x i32>, iXLen) -define void @test_sf_vc_fvw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvw_se_e32m1: +define void @test_sf_vc_fwvv_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvv_se_e64m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 +; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2i64.nxv2i32.f32.iXLen(iXLen 1, <2 x i64> %vd, <2 x i32> %vs2, float %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f64.nxv2i32.nxv2i32.iXLen(iXLen 3, <2 x double> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2i64.nxv2i32.f32.iXLen(iXLen, <2 x i64>, <2 x i32>, float, iXLen) +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f64.nxv2i32.nxv2i32.iXLen(iXLen, <2 x double>, <2 x i32>, <2 x i32>, iXLen) -define void @test_sf_vc_fvw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvw_se_e32m2: +define <2 x double> @test_sf_vc_fw_fwvvv_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call <2 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv2f64.nxv2i32.nxv2i32.iXLen(iXLen 3, <2 x double> %vd, <2 x i32> %vs2, <2 x i32> %vs1, iXLen %vl) + ret <2 x double> %0 +} + +declare <2 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv2f64.nxv2i32.nxv2i32.iXLen(iXLen, <2 x double>, <2 x i32>, <2 x i32>, iXLen) + +define void @test_sf_vc_fwvv_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvv_se_e64m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v10, fa0 +; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4i64.nxv4i32.f32.iXLen(iXLen 1, <4 x i64> %vd, <4 x i32> %vs2, float %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f64.nxv4i32.nxv4i32.iXLen(iXLen 3, <4 x double> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4i64.nxv4i32.f32.iXLen(iXLen, <4 x i64>, <4 x i32>, float, iXLen) +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f64.nxv4i32.nxv4i32.iXLen(iXLen, <4 x double>, <4 x i32>, <4 x i32>, iXLen) + +define <4 x double> @test_sf_vc_fw_fwvvv_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11 +; CHECK-NEXT: ret +entry: + %0 = tail call <4 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv4f64.nxv4i32.nxv4i32.iXLen(iXLen 3, <4 x double> %vd, <4 x i32> %vs2, <4 x i32> %vs1, iXLen %vl) + ret <4 x double> %0 +} + +declare <4 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv4f64.nxv4i32.nxv4i32.iXLen(iXLen, <4 x double>, <4 x i32>, <4 x i32>, iXLen) -define void @test_sf_vc_fvw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvw_se_e32m4: +define void @test_sf_vc_fwvv_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvv_se_e64m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v12, fa0 +; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f64.nxv8i32.nxv8i32.iXLen(iXLen 3, <8 x double> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f64.nxv8i32.nxv8i32.iXLen(iXLen, <8 x double>, <8 x i32>, <8 x i32>, iXLen) + +define <8 x double> @test_sf_vc_fw_fwvvv_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14 +; CHECK-NEXT: ret +entry: + %0 = tail call <8 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv8f64.nxv8i32.nxv8i32.iXLen(iXLen 3, <8 x double> %vd, <8 x i32> %vs2, <8 x i32> %vs1, iXLen %vl) + ret <8 x double> %0 +} + +declare <8 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv8f64.nxv8i32.nxv8i32.iXLen(iXLen, <8 x double>, <8 x i32>, <8 x i32>, iXLen) + +define void @test_sf_vc_fwvx_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvx_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f32.nxv1i16.i16.iXLen(iXLen 3, <1 x float> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f32.nxv1i16.i16.iXLen(iXLen, <1 x float>, <1 x i16>, i16, iXLen) + +define <1 x float> @test_sf_vc_w_fwvx_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <1 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv1f32.nxv1f16.nxv1i16.i16.iXLen(iXLen 3, <1 x float> %vd, <1 x i16> %vs2, i16 %rs1, iXLen %vl) + ret <1 x float> %0 +} + +declare <1 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv1f32.nxv1f16.nxv1i16.i16.iXLen(iXLen, <1 x float>, <1 x i16>, i16, iXLen) + +define void @test_sf_vc_fwvx_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvx_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f32.nxv2i16.i16.iXLen(iXLen 3, <2 x float> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f32.nxv2i16.i16.iXLen(iXLen, <2 x float>, <2 x i16>, i16, iXLen) + +define <2 x float> @test_sf_vc_w_fwvx_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <2 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv2f32.nxv2f16.nxv2i16.i16.iXLen(iXLen 3, <2 x float> %vd, <2 x i16> %vs2, i16 %rs1, iXLen %vl) + ret <2 x float> %0 +} + +declare <2 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv2f32.nxv2f16.nxv2i16.i16.iXLen(iXLen, <2 x float>, <2 x i16>, i16, iXLen) + +define void @test_sf_vc_fwvx_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvx_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f32.nxv4i16.i16.iXLen(iXLen 3, <4 x float> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f32.nxv4i16.i16.iXLen(iXLen, <4 x float>, <4 x i16>, i16, iXLen) + +define <4 x float> @test_sf_vc_w_fwvx_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <4 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv4f32.nxv4f16.nxv4i16.i16.iXLen(iXLen 3, <4 x float> %vd, <4 x i16> %vs2, i16 %rs1, iXLen %vl) + ret <4 x float> %0 +} + +declare <4 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv4f32.nxv4f16.nxv4i16.i16.iXLen(iXLen, <4 x float>, <4 x i16>, i16, iXLen) + +define void @test_sf_vc_fwvx_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvx_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f32.nxv8i16.i16.iXLen(iXLen 3, <8 x float> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f32.nxv8i16.i16.iXLen(iXLen, <8 x float>, <8 x i16>, i16, iXLen) + +define <8 x float> @test_sf_vc_w_fwvx_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <8 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv8f32.nxv8f16.nxv8i16.i16.iXLen(iXLen 3, <8 x float> %vd, <8 x i16> %vs2, i16 %rs1, iXLen %vl) + ret <8 x float> %0 +} + +declare <8 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv8f32.nxv8f16.nxv8i16.i16.iXLen(iXLen, <8 x float>, <8 x i16>, i16, iXLen) + +define void @test_sf_vc_fwvx_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvx_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16f32.nxv16i16.i16.iXLen(iXLen 3, <16 x float> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16f32.nxv16i16.i16.iXLen(iXLen, <16 x float>, <16 x i16>, i16, iXLen) + +define <16 x float> @test_sf_vc_w_fwvx_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <16 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv16f32.nxv16f16.nxv16i16.i16.iXLen(iXLen 3, <16 x float> %vd, <16 x i16> %vs2, i16 %rs1, iXLen %vl) + ret <16 x float> %0 +} + +declare <16 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv16f32.nxv16f16.nxv16i16.i16.iXLen(iXLen, <16 x float>, <16 x i16>, i16, iXLen) + +define void @test_sf_vc_fwvx_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvx_se_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f64.nxv1i32.i32.iXLen(iXLen 3, <1 x double> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f64.nxv1i32.i32.iXLen(iXLen, <1 x double>, <1 x i32>, i32, iXLen) + +define <1 x double> @test_sf_vc_w_fwvx_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <1 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv1f64.nxv1f32.nxv1i32.i32.iXLen(iXLen 3, <1 x double> %vd, <1 x i32> %vs2, i32 %rs1, iXLen %vl) + ret <1 x double> %0 +} + +declare <1 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv1f64.nxv1f32.nxv1i32.i32.iXLen(iXLen, <1 x double>, <1 x i32>, i32, iXLen) + +define void @test_sf_vc_fwvx_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvx_se_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f64.nxv2i32.i32.iXLen(iXLen 3, <2 x double> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f64.nxv2i32.i32.iXLen(iXLen, <2 x double>, <2 x i32>, i32, iXLen) + +define <2 x double> @test_sf_vc_w_fwvx_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <2 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv2f64.nxv2f32.nxv2i32.i32.iXLen(iXLen 3, <2 x double> %vd, <2 x i32> %vs2, i32 %rs1, iXLen %vl) + ret <2 x double> %0 +} + +declare <2 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv2f64.nxv2f32.nxv2i32.i32.iXLen(iXLen, <2 x double>, <2 x i32>, i32, iXLen) + +define void @test_sf_vc_fwvx_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvx_se_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8i64.nxv8i32.f32.iXLen(iXLen 1, <8 x i64> %vd, <8 x i32> %vs2, float %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f64.nxv4i32.i32.iXLen(iXLen 3, <4 x double> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8i64.nxv8i32.f32.iXLen(iXLen, <8 x i64>, <8 x i32>, float, iXLen) +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f64.nxv4i32.i32.iXLen(iXLen, <4 x double>, <4 x i32>, i32, iXLen) + +define <4 x double> @test_sf_vc_w_fwvx_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <4 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv4f64.nxv4f32.nxv4i32.i32.iXLen(iXLen 3, <4 x double> %vd, <4 x i32> %vs2, i32 %rs1, iXLen %vl) + ret <4 x double> %0 +} + +declare <4 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv4f64.nxv4f32.nxv4i32.i32.iXLen(iXLen, <4 x double>, <4 x i32>, i32, iXLen) -define <1 x i32> @test_sf_vc_v_fvw_se_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_se_e16mf4: +define void @test_sf_vc_fwvx_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvx_se_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f64.nxv8i32.i32.iXLen(iXLen 3, <8 x double> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f64.nxv8i32.i32.iXLen(iXLen, <8 x double>, <8 x i32>, i32, iXLen) + +define <8 x double> @test_sf_vc_w_fwvx_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call <8 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv8f64.nxv8f32.nxv8i32.i32.iXLen(iXLen 3, <8 x double> %vd, <8 x i32> %vs2, i32 %rs1, iXLen %vl) + ret <8 x double> %0 +} + +declare <8 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv8f64.nxv8f32.nxv8i32.i32.iXLen(iXLen, <8 x double>, <8 x i32>, i32, iXLen) + +define void @test_sf_vc_fwvi_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvi_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 3 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f32.nxv1i16.iXLen.iXLen(iXLen 3, <1 x float> %vd, <1 x i16> %vs2, iXLen 3, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f32.nxv1i16.iXLen.iXLen(iXLen, <1 x float>, <1 x i16>, iXLen, iXLen) + +define <1 x float> @test_sf_vc_fw_fwvi_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv1i32.iXLen.nxv1i16.f16.iXLen(iXLen 1, <1 x i32> %vd, <1 x i16> %vs2, half %fs1, iXLen %vl) - ret <1 x i32> %0 + %0 = tail call <1 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv1f32.nxv1f16.nxv1i16.iXLen.iXLen(iXLen 3, <1 x float> %vd, <1 x i16> %vs2, iXLen 3, iXLen %vl) + ret <1 x float> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv1i32.iXLen.nxv1i16.f16.iXLen(iXLen, <1 x i32>, <1 x i16>, half, iXLen) +declare <1 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv1f32.nxv1f16.nxv1i16.iXLen.iXLen(iXLen, <1 x float>, <1 x i16>, iXLen, iXLen) -define <2 x i32> @test_sf_vc_v_fvw_se_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_se_e16mf2: +define void @test_sf_vc_fwvi_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvi_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 3 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f32.nxv2i16.iXLen.iXLen(iXLen 3, <2 x float> %vd, <2 x i16> %vs2, iXLen 3, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f32.nxv2i16.iXLen.iXLen(iXLen, <2 x float>, <2 x i16>, iXLen, iXLen) + +define <2 x float> @test_sf_vc_fw_fwvi_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv2i32.iXLen.nxv2i16.f16.iXLen(iXLen 1, <2 x i32> %vd, <2 x i16> %vs2, half %fs1, iXLen %vl) - ret <2 x i32> %0 + %0 = tail call <2 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv2f32.nxv2f16.nxv2i16.iXLen.iXLen(iXLen 3, <2 x float> %vd, <2 x i16> %vs2, iXLen 3, iXLen %vl) + ret <2 x float> %0 +} + +declare <2 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv2f32.nxv2f16.nxv2i16.iXLen.iXLen(iXLen, <2 x float>, <2 x i16>, iXLen, iXLen) + +define void @test_sf_vc_fwvi_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvi_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 3 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f32.nxv4i16.iXLen.iXLen(iXLen 3, <4 x float> %vd, <4 x i16> %vs2, iXLen 3, iXLen %vl) + ret void } -declare <2 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv2i32.iXLen.nxv2i16.f16.iXLen(iXLen, <2 x i32>, <2 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f32.nxv4i16.iXLen.iXLen(iXLen, <4 x float>, <4 x i16>, iXLen, iXLen) -define <4 x i32> @test_sf_vc_v_fvw_se_e16m1(<4 x i32> %vd, <4 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_se_e16m1: +define <4 x float> @test_sf_vc_fw_fwvi_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv4i32.iXLen.nxv4i16.f16.iXLen(iXLen 1, <4 x i32> %vd, <4 x i16> %vs2, half %fs1, iXLen %vl) - ret <4 x i32> %0 + %0 = tail call <4 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv4f32.nxv4f16.nxv4i16.iXLen.iXLen(iXLen 3, <4 x float> %vd, <4 x i16> %vs2, iXLen 3, iXLen %vl) + ret <4 x float> %0 } -declare <4 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv4i32.iXLen.nxv4i16.f16.iXLen(iXLen, <4 x i32>, <4 x i16>, half, iXLen) +declare <4 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv4f32.nxv4f16.nxv4i16.iXLen.iXLen(iXLen, <4 x float>, <4 x i16>, iXLen, iXLen) -define <8 x i32> @test_sf_vc_v_fvw_se_e16m2(<8 x i32> %vd, <8 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_se_e16m2: +define void @test_sf_vc_fwvi_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvi_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 3 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f32.nxv8i16.iXLen.iXLen(iXLen 3, <8 x float> %vd, <8 x i16> %vs2, iXLen 3, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f32.nxv8i16.iXLen.iXLen(iXLen, <8 x float>, <8 x i16>, iXLen, iXLen) + +define <8 x float> @test_sf_vc_fw_fwvi_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0 +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 3 ; CHECK-NEXT: ret entry: - %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv8i32.iXLen.nxv8i16.f16.iXLen(iXLen 1, <8 x i32> %vd, <8 x i16> %vs2, half %fs1, iXLen %vl) - ret <8 x i32> %0 + %0 = tail call <8 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv8f32.nxv8f16.nxv8i16.iXLen.iXLen(iXLen 3, <8 x float> %vd, <8 x i16> %vs2, iXLen 3, iXLen %vl) + ret <8 x float> %0 } -declare <8 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv8i32.iXLen.nxv8i16.f16.iXLen(iXLen, <8 x i32>, <8 x i16>, half, iXLen) +declare <8 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv8f32.nxv8f16.nxv8i16.iXLen.iXLen(iXLen, <8 x float>, <8 x i16>, iXLen, iXLen) -define <16 x i32> @test_sf_vc_v_fvw_se_e16m4(<16 x i32> %vd, <16 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_se_e16m4: +define void @test_sf_vc_fwvi_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvi_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 3 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16f32.nxv16i16.iXLen.iXLen(iXLen 3, <16 x float> %vd, <16 x i16> %vs2, iXLen 3, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16f32.nxv16i16.iXLen.iXLen(iXLen, <16 x float>, <16 x i16>, iXLen, iXLen) + +define <16 x float> @test_sf_vc_fw_fwvi_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0 +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 3 ; CHECK-NEXT: ret entry: - %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv16i32.iXLen.nxv16i16.f16.iXLen(iXLen 1, <16 x i32> %vd, <16 x i16> %vs2, half %fs1, iXLen %vl) - ret <16 x i32> %0 + %0 = tail call <16 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv16f32.nxv16f16.nxv16i16.iXLen.iXLen(iXLen 3, <16 x float> %vd, <16 x i16> %vs2, iXLen 3, iXLen %vl) + ret <16 x float> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv16i32.iXLen.nxv16i16.f16.iXLen(iXLen, <16 x i32>, <16 x i16>, half, iXLen) +declare <16 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv16f32.nxv16f16.nxv16i16.iXLen.iXLen(iXLen, <16 x float>, <16 x i16>, iXLen, iXLen) -define <1 x i64> @test_sf_vc_v_fvw_se_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_se_e32mf2: +define void @test_sf_vc_fwvi_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvi_se_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 3 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f64.nxv1i32.iXLen.iXLen(iXLen 3, <1 x double> %vd, <1 x i32> %vs2, iXLen 3, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f64.nxv1i32.iXLen.iXLen(iXLen, <1 x double>, <1 x i32>, iXLen, iXLen) + +define <1 x double> @test_sf_vc_fw_fwvi_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv1i64.iXLen.nxv1i32.f32.iXLen(iXLen 1, <1 x i64> %vd, <1 x i32> %vs2, float %fs1, iXLen %vl) - ret <1 x i64> %0 + %0 = tail call <1 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv1f64.nxv1f32.nxv1i32.iXLen.iXLen(iXLen 3, <1 x double> %vd, <1 x i32> %vs2, iXLen 3, iXLen %vl) + ret <1 x double> %0 +} + +declare <1 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv1f64.nxv1f32.nxv1i32.iXLen.iXLen(iXLen, <1 x double>, <1 x i32>, iXLen, iXLen) + +define void @test_sf_vc_fwvi_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvi_se_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 3 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f64.nxv2i32.iXLen.iXLen(iXLen 3, <2 x double> %vd, <2 x i32> %vs2, iXLen 3, iXLen %vl) + ret void } -declare <1 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv1i64.iXLen.nxv1i32.f32.iXLen(iXLen, <1 x i64>, <1 x i32>, float, iXLen) +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f64.nxv2i32.iXLen.iXLen(iXLen, <2 x double>, <2 x i32>, iXLen, iXLen) -define <2 x i64> @test_sf_vc_v_fvw_se_e32m1(<2 x i64> %vd, <2 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_se_e32m1: +define <2 x double> @test_sf_vc_fw_fwvi_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv2i64.iXLen.nxv2i32.f32.iXLen(iXLen 1, <2 x i64> %vd, <2 x i32> %vs2, float %fs1, iXLen %vl) - ret <2 x i64> %0 + %0 = tail call <2 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv2f64.nxv2f32.nxv2i32.iXLen.iXLen(iXLen 3, <2 x double> %vd, <2 x i32> %vs2, iXLen 3, iXLen %vl) + ret <2 x double> %0 +} + +declare <2 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv2f64.nxv2f32.nxv2i32.iXLen.iXLen(iXLen, <2 x double>, <2 x i32>, iXLen, iXLen) + +define void @test_sf_vc_fwvi_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvi_se_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 3 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f64.nxv4i32.iXLen.iXLen(iXLen 3, <4 x double> %vd, <4 x i32> %vs2, iXLen 3, iXLen %vl) + ret void } -declare <2 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv2i64.iXLen.nxv2i32.f32.iXLen(iXLen, <2 x i64>, <2 x i32>, float, iXLen) +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f64.nxv4i32.iXLen.iXLen(iXLen, <4 x double>, <4 x i32>, iXLen, iXLen) -define <4 x i64> @test_sf_vc_v_fvw_se_e32m2(<4 x i64> %vd, <4 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_se_e32m2: +define <4 x double> @test_sf_vc_fw_fwvi_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0 +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 3 ; CHECK-NEXT: ret entry: - %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv4i64.iXLen.nxv4i32.f32.iXLen(iXLen 1, <4 x i64> %vd, <4 x i32> %vs2, float %fs1, iXLen %vl) - ret <4 x i64> %0 + %0 = tail call <4 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv4f64.nxv4f32.nxv4i32.iXLen.iXLen(iXLen 3, <4 x double> %vd, <4 x i32> %vs2, iXLen 3, iXLen %vl) + ret <4 x double> %0 +} + +declare <4 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv4f64.nxv4f32.nxv4i32.iXLen.iXLen(iXLen, <4 x double>, <4 x i32>, iXLen, iXLen) + +define void @test_sf_vc_fwvi_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvi_se_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 3 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f64.nxv8i32.iXLen.iXLen(iXLen 3, <8 x double> %vd, <8 x i32> %vs2, iXLen 3, iXLen %vl) + ret void } -declare <4 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv4i64.iXLen.nxv4i32.f32.iXLen(iXLen, <4 x i64>, <4 x i32>, float, iXLen) +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f64.nxv8i32.iXLen.iXLen(iXLen, <8 x double>, <8 x i32>, iXLen, iXLen) -define <8 x i64> @test_sf_vc_v_fvw_se_e32m4(<8 x i64> %vd, <8 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_se_e32m4: +define <8 x double> @test_sf_vc_fw_fwvi_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0 +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 3 ; CHECK-NEXT: ret entry: - %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv8i64.iXLen.nxv8i32.f32.iXLen(iXLen 1, <8 x i64> %vd, <8 x i32> %vs2, float %fs1, iXLen %vl) - ret <8 x i64> %0 + %0 = tail call <8 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv8f64.nxv8f32.nxv8i32.iXLen.iXLen(iXLen 3, <8 x double> %vd, <8 x i32> %vs2, iXLen 3, iXLen %vl) + ret <8 x double> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv8i64.iXLen.nxv8i32.f32.iXLen(iXLen, <8 x i64>, <8 x i32>, float, iXLen) +declare <8 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv8f64.nxv8f32.nxv8i32.iXLen.iXLen(iXLen, <8 x double>, <8 x i32>, iXLen, iXLen) -define <1 x i32> @test_sf_vc_v_fvw_e16mf4(<1 x i32> %vd, <1 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_e16mf4: +define void @test_sf_vc_fwvf_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvf_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f32.nxv1i16.f16.iXLen(iXLen 1, <1 x float> %vd, <1 x i16> %vs2, half %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f32.nxv1i16.f16.iXLen(iXLen, <1 x float>, <1 x i16>, half, iXLen) + +define <1 x float> @test_sf_vc_fw_fwvf_se_e32mf2(<1 x float> %vd, <1 x i16> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <1 x i32> @llvm.riscv.sf.vc.v.fvw.nxv1i32.iXLen.nxv1i16.f16.iXLen(iXLen 1, <1 x i32> %vd, <1 x i16> %vs2, half %fs1, iXLen %vl) - ret <1 x i32> %0 + %0 = tail call <1 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv1f32.nxv1f16.nxv1i16.f16.iXLen(iXLen 1, <1 x float> %vd, <1 x i16> %vs2, half %rs1, iXLen %vl) + ret <1 x float> %0 } -declare <1 x i32> @llvm.riscv.sf.vc.v.fvw.nxv1i32.iXLen.nxv1i16.f16.iXLen(iXLen, <1 x i32>, <1 x i16>, half, iXLen) +declare <1 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv1f32.nxv1f16.nxv1i16.f16.iXLen(iXLen, <1 x float>, <1 x i16>, half, iXLen) -define <2 x i32> @test_sf_vc_v_fvw_e16mf2(<2 x i32> %vd, <2 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_e16mf2: +define void @test_sf_vc_fwvf_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvf_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f32.nxv2i16.f16.iXLen(iXLen 1, <2 x float> %vd, <2 x i16> %vs2, half %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f32.nxv2i16.f16.iXLen(iXLen, <2 x float>, <2 x i16>, half, iXLen) + +define <2 x float> @test_sf_vc_fw_fwvf_se_e32m1(<2 x float> %vd, <2 x i16> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <2 x i32> @llvm.riscv.sf.vc.v.fvw.nxv2i32.iXLen.nxv2i16.f16.iXLen(iXLen 1, <2 x i32> %vd, <2 x i16> %vs2, half %fs1, iXLen %vl) - ret <2 x i32> %0 + %0 = tail call <2 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv2f32.nxv2f16.nxv2i16.f16.iXLen(iXLen 1, <2 x float> %vd, <2 x i16> %vs2, half %rs1, iXLen %vl) + ret <2 x float> %0 +} + +declare <2 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv2f32.nxv2f16.nxv2i16.f16.iXLen(iXLen, <2 x float>, <2 x i16>, half, iXLen) + +define void @test_sf_vc_fwvf_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvf_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f32.nxv4i16.f16.iXLen(iXLen 1, <4 x float> %vd, <4 x i16> %vs2, half %rs1, iXLen %vl) + ret void } -declare <2 x i32> @llvm.riscv.sf.vc.v.fvw.nxv2i32.iXLen.nxv2i16.f16.iXLen(iXLen, <2 x i32>, <2 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f32.nxv4i16.f16.iXLen(iXLen, <4 x float>, <4 x i16>, half, iXLen) -define <4 x i32> @test_sf_vc_v_fvw_e16m1(<4 x i32> %vd, <4 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_e16m1: +define <4 x float> @test_sf_vc_fw_fwvf_se_e32m2(<4 x float> %vd, <4 x i16> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <4 x i32> @llvm.riscv.sf.vc.v.fvw.nxv4i32.iXLen.nxv4i16.f16.iXLen(iXLen 1, <4 x i32> %vd, <4 x i16> %vs2, half %fs1, iXLen %vl) - ret <4 x i32> %0 + %0 = tail call <4 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv4f32.nxv4f16.nxv4i16.f16.iXLen(iXLen 1, <4 x float> %vd, <4 x i16> %vs2, half %rs1, iXLen %vl) + ret <4 x float> %0 +} + +declare <4 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv4f32.nxv4f16.nxv4i16.f16.iXLen(iXLen, <4 x float>, <4 x i16>, half, iXLen) + +define void @test_sf_vc_fwvf_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvf_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v10, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f32.nxv8i16.f16.iXLen(iXLen 1, <8 x float> %vd, <8 x i16> %vs2, half %rs1, iXLen %vl) + ret void } -declare <4 x i32> @llvm.riscv.sf.vc.v.fvw.nxv4i32.iXLen.nxv4i16.f16.iXLen(iXLen, <4 x i32>, <4 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f32.nxv8i16.f16.iXLen(iXLen, <8 x float>, <8 x i16>, half, iXLen) -define <8 x i32> @test_sf_vc_v_fvw_e16m2(<8 x i32> %vd, <8 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_e16m2: +define <8 x float> @test_sf_vc_fw_fwvf_se_e32m4(<8 x float> %vd, <8 x i16> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <8 x i32> @llvm.riscv.sf.vc.v.fvw.nxv8i32.iXLen.nxv8i16.f16.iXLen(iXLen 1, <8 x i32> %vd, <8 x i16> %vs2, half %fs1, iXLen %vl) - ret <8 x i32> %0 + %0 = tail call <8 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv8f32.nxv8f16.nxv8i16.f16.iXLen(iXLen 1, <8 x float> %vd, <8 x i16> %vs2, half %rs1, iXLen %vl) + ret <8 x float> %0 +} + +declare <8 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv8f32.nxv8f16.nxv8i16.f16.iXLen(iXLen, <8 x float>, <8 x i16>, half, iXLen) + +define void @test_sf_vc_fwvf_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvf_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v12, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16f32.nxv16i16.f16.iXLen(iXLen 1, <16 x float> %vd, <16 x i16> %vs2, half %rs1, iXLen %vl) + ret void } -declare <8 x i32> @llvm.riscv.sf.vc.v.fvw.nxv8i32.iXLen.nxv8i16.f16.iXLen(iXLen, <8 x i32>, <8 x i16>, half, iXLen) +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16f32.nxv16i16.f16.iXLen(iXLen, <16 x float>, <16 x i16>, half, iXLen) -define <16 x i32> @test_sf_vc_v_fvw_e16m4(<16 x i32> %vd, <16 x i16> %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_e16m4: +define <16 x float> @test_sf_vc_fw_fwvf_se_e32m8(<16 x float> %vd, <16 x i16> %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <16 x i32> @llvm.riscv.sf.vc.v.fvw.nxv16i32.iXLen.nxv16i16.f16.iXLen(iXLen 1, <16 x i32> %vd, <16 x i16> %vs2, half %fs1, iXLen %vl) - ret <16 x i32> %0 + %0 = tail call <16 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv16f32.nxv16f16.nxv16i16.f16.iXLen(iXLen 1, <16 x float> %vd, <16 x i16> %vs2, half %rs1, iXLen %vl) + ret <16 x float> %0 } -declare <16 x i32> @llvm.riscv.sf.vc.v.fvw.nxv16i32.iXLen.nxv16i16.f16.iXLen(iXLen, <16 x i32>, <16 x i16>, half, iXLen) +declare <16 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv16f32.nxv16f16.nxv16i16.f16.iXLen(iXLen, <16 x float>, <16 x i16>, half, iXLen) -define <1 x i64> @test_sf_vc_v_fvw_e32mf2(<1 x i64> %vd, <1 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_e32mf2: +define void @test_sf_vc_fwvf_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvf_se_e64m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f64.nxv1i32.f32.iXLen(iXLen 1, <1 x double> %vd, <1 x i32> %vs2, float %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f64.nxv1i32.f32.iXLen(iXLen, <1 x double>, <1 x i32>, float, iXLen) + +define <1 x double> @test_sf_vc_fw_fwvf_se_e64m1(<1 x double> %vd, <1 x i32> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <1 x i64> @llvm.riscv.sf.vc.v.fvw.nxv1i64.iXLen.nxv1i32.f32.iXLen(iXLen 1, <1 x i64> %vd, <1 x i32> %vs2, float %fs1, iXLen %vl) - ret <1 x i64> %0 + %0 = tail call <1 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv1f64.nxv1f32.nxv1i32.f32.iXLen(iXLen 1, <1 x double> %vd, <1 x i32> %vs2, float %rs1, iXLen %vl) + ret <1 x double> %0 } -declare <1 x i64> @llvm.riscv.sf.vc.v.fvw.nxv1i64.iXLen.nxv1i32.f32.iXLen(iXLen, <1 x i64>, <1 x i32>, float, iXLen) +declare <1 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv1f64.nxv1f32.nxv1i32.f32.iXLen(iXLen, <1 x double>, <1 x i32>, float, iXLen) -define <2 x i64> @test_sf_vc_v_fvw_e32m1(<2 x i64> %vd, <2 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_e32m1: +define void @test_sf_vc_fwvf_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvf_se_e64m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f64.nxv2i32.f32.iXLen(iXLen 1, <2 x double> %vd, <2 x i32> %vs2, float %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f64.nxv2i32.f32.iXLen(iXLen, <2 x double>, <2 x i32>, float, iXLen) + +define <2 x double> @test_sf_vc_fw_fwvf_se_e64m2(<2 x double> %vd, <2 x i32> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <2 x i64> @llvm.riscv.sf.vc.v.fvw.nxv2i64.iXLen.nxv2i32.f32.iXLen(iXLen 1, <2 x i64> %vd, <2 x i32> %vs2, float %fs1, iXLen %vl) - ret <2 x i64> %0 + %0 = tail call <2 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv2f64.nxv2f32.nxv2i32.f32.iXLen(iXLen 1, <2 x double> %vd, <2 x i32> %vs2, float %rs1, iXLen %vl) + ret <2 x double> %0 +} + +declare <2 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv2f64.nxv2f32.nxv2i32.f32.iXLen(iXLen, <2 x double>, <2 x i32>, float, iXLen) + +define void @test_sf_vc_fwvf_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvf_se_e64m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v10, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f64.nxv4i32.f32.iXLen(iXLen 1, <4 x double> %vd, <4 x i32> %vs2, float %rs1, iXLen %vl) + ret void } -declare <2 x i64> @llvm.riscv.sf.vc.v.fvw.nxv2i64.iXLen.nxv2i32.f32.iXLen(iXLen, <2 x i64>, <2 x i32>, float, iXLen) +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f64.nxv4i32.f32.iXLen(iXLen, <4 x double>, <4 x i32>, float, iXLen) -define <4 x i64> @test_sf_vc_v_fvw_e32m2(<4 x i64> %vd, <4 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_e32m2: +define <4 x double> @test_sf_vc_fw_fwvf_se_e64m4(<4 x double> %vd, <4 x i32> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <4 x i64> @llvm.riscv.sf.vc.v.fvw.nxv4i64.iXLen.nxv4i32.f32.iXLen(iXLen 1, <4 x i64> %vd, <4 x i32> %vs2, float %fs1, iXLen %vl) - ret <4 x i64> %0 + %0 = tail call <4 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv4f64.nxv4f32.nxv4i32.f32.iXLen(iXLen 1, <4 x double> %vd, <4 x i32> %vs2, float %rs1, iXLen %vl) + ret <4 x double> %0 } -declare <4 x i64> @llvm.riscv.sf.vc.v.fvw.nxv4i64.iXLen.nxv4i32.f32.iXLen(iXLen, <4 x i64>, <4 x i32>, float, iXLen) +declare <4 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv4f64.nxv4f32.nxv4i32.f32.iXLen(iXLen, <4 x double>, <4 x i32>, float, iXLen) -define <8 x i64> @test_sf_vc_v_fvw_e32m4(<8 x i64> %vd, <8 x i32> %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_e32m4: +define void @test_sf_vc_fwvf_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvf_se_e64m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v12, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f64.nxv8i32.f32.iXLen(iXLen 1, <8 x double> %vd, <8 x i32> %vs2, float %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f64.nxv8i32.f32.iXLen(iXLen, <8 x double>, <8 x i32>, float, iXLen) + +define <8 x double> @test_sf_vc_fw_fwvf_se_e64m8(<8 x double> %vd, <8 x i32> %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call <8 x i64> @llvm.riscv.sf.vc.v.fvw.nxv8i64.iXLen.nxv8i32.f32.iXLen(iXLen 1, <8 x i64> %vd, <8 x i32> %vs2, float %fs1, iXLen %vl) - ret <8 x i64> %0 + %0 = tail call <8 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv8f64.nxv8f32.nxv8i32.f32.iXLen(iXLen 1, <8 x double> %vd, <8 x i32> %vs2, float %rs1, iXLen %vl) + ret <8 x double> %0 } -declare <8 x i64> @llvm.riscv.sf.vc.v.fvw.nxv8i64.iXLen.nxv8i32.f32.iXLen(iXLen, <8 x i64>, <8 x i32>, float, iXLen) +declare <8 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv8f64.nxv8f32.nxv8i32.f32.iXLen(iXLen, <8 x double>, <8 x i32>, float, iXLen) + diff --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll index 59124ed817941..e304b19e71c8f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-x.ll @@ -1953,3 +1953,290 @@ entry: } declare @llvm.riscv.sf.vc.v.i.nxv8f64.iXLen.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_fv_x_se_e16mf4(i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_x_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 4, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv1f16.i16.iXLen(iXLen 3, iXLen 4, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv1f16.i16.iXLen(iXLen, iXLen, i16, iXLen) + +define @test_sf_vc_fv_x_se_e16mf2(i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_x_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 4, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv2f16.i16.iXLen(iXLen 3, iXLen 4, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv2f16.i16.iXLen(iXLen, iXLen, i16, iXLen) + +define @test_sf_vc_fv_x_se_e16m1(i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_x_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 4, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv4f16.i16.iXLen(iXLen 3, iXLen 4, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv4f16.i16.iXLen(iXLen, iXLen, i16, iXLen) + +define @test_sf_vc_fv_x_se_e16m2(i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_x_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 4, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv8f16.i16.iXLen(iXLen 3, iXLen 4, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv8f16.i16.iXLen(iXLen, iXLen, i16, iXLen) + +define @test_sf_vc_fv_x_se_e16m4(i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_x_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 4, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv16f16.i16.iXLen(iXLen 3, iXLen 4, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv16f16.i16.iXLen(iXLen, iXLen, i16, iXLen) + +define @test_sf_vc_fv_x_se_e16m8(i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_x_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 4, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv32f16.i16.iXLen(iXLen 3, iXLen 4, i16 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv32f16.i16.iXLen(iXLen, iXLen, i16, iXLen) + +define @test_sf_vc_fv_x_se_e32mf2(i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_x_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 4, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv1f32.i32.iXLen(iXLen 3, iXLen 4, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv1f32.i32.iXLen(iXLen, iXLen, i32, iXLen) + +define @test_sf_vc_fv_x_se_e32m1(i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_x_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 4, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv2f32.i32.iXLen(iXLen 3, iXLen 4, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv2f32.i32.iXLen(iXLen, iXLen, i32, iXLen) + +define @test_sf_vc_fv_x_se_e32m2(i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_x_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 4, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv4f32.i32.iXLen(iXLen 3, iXLen 4, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv4f32.i32.iXLen(iXLen, iXLen, i32, iXLen) + +define @test_sf_vc_fv_x_se_e32m4(i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_x_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 4, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv8f32.i32.iXLen(iXLen 3, iXLen 4, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv8f32.i32.iXLen(iXLen, iXLen, i32, iXLen) + +define @test_sf_vc_fv_x_se_e32m8(i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_x_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.x 3, 4, v8, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.x.se.nxv16f32.i32.iXLen(iXLen 3, iXLen 4, i32 %rs1, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.x.se.nxv16f32.i32.iXLen(iXLen, iXLen, i32, iXLen) + +define @test_sf_vc_fv_i_se_e16mf4(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_i_se_e16mf4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 8, v8, 4 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv1f16.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv1f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_fv_i_se_e16mf2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_i_se_e16mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 8, v8, 4 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv2f16.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv2f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_fv_i_se_e16m1(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_i_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 8, v8, 4 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv4f16.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv4f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_fv_i_se_e16m2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_i_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 8, v8, 4 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv8f16.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv8f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_fv_i_se_e16m4(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_i_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 8, v8, 4 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv16f16.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv16f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_fv_i_se_e16m8(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_i_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 8, v8, 4 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv32f16.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv32f16.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_fv_i_se_e32mf2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_i_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 8, v8, 4 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv1f32.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv1f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_fv_i_se_e32m1(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_i_se_e32m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 8, v8, 4 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv2f32.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv2f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_fv_i_se_e32m2(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_i_se_e32m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 8, v8, 4 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv4f32.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv4f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_fv_i_se_e32m4(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_i_se_e32m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 8, v8, 4 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv8f32.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv8f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + +define @test_sf_vc_fv_i_se_e32m8(iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_i_se_e32m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.i 3, 8, v8, 4 +; CHECK-NEXT: ret +entry: + %0 = tail call @llvm.riscv.sf.vc.v.i.se.nxv16f32.iXLen.iXLen(iXLen 3, iXLen 8, iXLen 4, iXLen %vl) + ret %0 +} + +declare @llvm.riscv.sf.vc.v.i.se.nxv16f32.iXLen.iXLen(iXLen, iXLen, iXLen, iXLen) + diff --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll index 91ba424212329..354ca3ad973f7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xv.ll @@ -2422,2771 +2422,1251 @@ entry: declare @llvm.riscv.sf.vc.v.iv.nxv8i64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) -define void @test_sf_vc_fv_se_e16mf4( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e16mf4: +define void @test_sf_vc_fvv_se_e16mf4( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e16mf4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i16.f16.iXLen(iXLen 1, iXLen 31, %vs2, half %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i16.f16.iXLen(iXLen, iXLen, , half, iXLen) - -define void @test_sf_vc_fv_se_e16mf2( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e16mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i16.f16.iXLen(iXLen 1, iXLen 31, %vs2, half %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i16.f16.iXLen(iXLen, iXLen, , half, iXLen) - -define void @test_sf_vc_fv_se_e16m1( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i16.f16.iXLen(iXLen 1, iXLen 31, %vs2, half %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i16.f16.iXLen(iXLen, iXLen, , half, iXLen) - -define void @test_sf_vc_fv_se_e16m2( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e16m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i16.f16.iXLen(iXLen 1, iXLen 31, %vs2, half %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i16.f16.iXLen(iXLen, iXLen, , half, iXLen) - -define void @test_sf_vc_fv_se_e16m4( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e16m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16i16.f16.iXLen(iXLen 1, iXLen 31, %vs2, half %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16i16.f16.iXLen(iXLen, iXLen, , half, iXLen) - -define void @test_sf_vc_fv_se_e16m8( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e16m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32i16.f16.iXLen(iXLen 1, iXLen 31, %vs2, half %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32i16.f16.iXLen(iXLen, iXLen, , half, iXLen) - -define void @test_sf_vc_fv_se_e32mf2( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e32mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i32.f32.iXLen(iXLen 1, iXLen 31, %vs2, float %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i32.f32.iXLen(iXLen, iXLen, , float, iXLen) - -define void @test_sf_vc_fv_se_e32m1( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e32m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i32.f32.iXLen(iXLen 1, iXLen 31, %vs2, float %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i32.f32.iXLen(iXLen, iXLen, , float, iXLen) - -define void @test_sf_vc_fv_se_e32m2( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e32m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i32.f32.iXLen(iXLen 1, iXLen 31, %vs2, float %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i32.f32.iXLen(iXLen, iXLen, , float, iXLen) - -define void @test_sf_vc_fv_se_e32m4( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e32m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i32.f32.iXLen(iXLen 1, iXLen 31, %vs2, float %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i32.f32.iXLen(iXLen, iXLen, , float, iXLen) - -define void @test_sf_vc_fv_se_e32m8( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e32m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16i32.f32.iXLen(iXLen 1, iXLen 31, %vs2, float %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16i32.f32.iXLen(iXLen, iXLen, , float, iXLen) - -define void @test_sf_vc_fv_se_e64m1( %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e64m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i64.f64.iXLen(iXLen 1, iXLen 31, %vs2, double %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i64.f64.iXLen(iXLen, iXLen, , double, iXLen) - -define void @test_sf_vc_fv_se_e64m2( %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e64m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i64.f64.iXLen(iXLen 1, iXLen 31, %vs2, double %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i64.f64.iXLen(iXLen, iXLen, , double, iXLen) - -define void @test_sf_vc_fv_se_e64m4( %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e64m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i64.f64.iXLen(iXLen 1, iXLen 31, %vs2, double %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i64.f64.iXLen(iXLen, iXLen, , double, iXLen) - -define void @test_sf_vc_fv_se_e64m8( %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fv_se_e64m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i64.f64.iXLen(iXLen 1, iXLen 31, %vs2, double %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i64.f64.iXLen(iXLen, iXLen, , double, iXLen) - -define @test_sf_vc_v_fv_se_e16mf4( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e16mf4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv1i16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.se.nxv1i16.iXLen.f16.iXLen(iXLen, , half, iXLen) - -define @test_sf_vc_v_fv_se_e16mf2( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e16mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv2i16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.se.nxv2i16.iXLen.f16.iXLen(iXLen, , half, iXLen) - -define @test_sf_vc_v_fv_se_e16m1( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv4i16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.se.nxv4i16.iXLen.f16.iXLen(iXLen, , half, iXLen) - -define @test_sf_vc_v_fv_se_e16m2( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e16m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv8i16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.se.nxv8i16.iXLen.f16.iXLen(iXLen, , half, iXLen) - -define @test_sf_vc_v_fv_se_e16m4( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e16m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv16i16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.se.nxv16i16.iXLen.f16.iXLen(iXLen, , half, iXLen) - -define @test_sf_vc_v_fv_se_e16m8( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e16m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv32i16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.se.nxv32i16.iXLen.f16.iXLen(iXLen, , half, iXLen) - -define @test_sf_vc_v_fv_se_e32mf2( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e32mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv1i32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.se.nxv1i32.iXLen.f32.iXLen(iXLen, , float, iXLen) - -define @test_sf_vc_v_fv_se_e32m1( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e32m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv2i32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.se.nxv2i32.iXLen.f32.iXLen(iXLen, , float, iXLen) - -define @test_sf_vc_v_fv_se_e32m2( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e32m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv4i32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.se.nxv4i32.iXLen.f32.iXLen(iXLen, , float, iXLen) - -define @test_sf_vc_v_fv_se_e32m4( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e32m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv8i32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.se.nxv8i32.iXLen.f32.iXLen(iXLen, , float, iXLen) - -define @test_sf_vc_v_fv_se_e32m8( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e32m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv16i32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.se.nxv16i32.iXLen.f32.iXLen(iXLen, , float, iXLen) - -define @test_sf_vc_v_fv_se_e64m1( %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e64m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv1i64.iXLen.f64.iXLen(iXLen 1, %vs2, double %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.se.nxv1i64.iXLen.f64.iXLen(iXLen, , double, iXLen) - -define @test_sf_vc_v_fv_se_e64m2( %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e64m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv2i64.iXLen.f64.iXLen(iXLen 1, %vs2, double %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.se.nxv2i64.iXLen.f64.iXLen(iXLen, , double, iXLen) - -define @test_sf_vc_v_fv_se_e64m4( %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e64m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv4i64.iXLen.f64.iXLen(iXLen 1, %vs2, double %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.se.nxv4i64.iXLen.f64.iXLen(iXLen, , double, iXLen) - -define @test_sf_vc_v_fv_se_e64m8( %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_se_e64m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv8i64.iXLen.f64.iXLen(iXLen 1, %vs2, double %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.se.nxv8i64.iXLen.f64.iXLen(iXLen, , double, iXLen) - -define @test_sf_vc_v_fv_e16mf4( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e16mf4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv1i16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.nxv1i16.iXLen.f16.iXLen(iXLen, , half, iXLen) - -define @test_sf_vc_v_fv_e16mf2( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e16mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv2i16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.nxv2i16.iXLen.f16.iXLen(iXLen, , half, iXLen) - -define @test_sf_vc_v_fv_e16m1( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv4i16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.nxv4i16.iXLen.f16.iXLen(iXLen, , half, iXLen) - -define @test_sf_vc_v_fv_e16m2( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e16m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv8i16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.nxv8i16.iXLen.f16.iXLen(iXLen, , half, iXLen) - -define @test_sf_vc_v_fv_e16m4( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e16m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv16i16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.nxv16i16.iXLen.f16.iXLen(iXLen, , half, iXLen) - -define @test_sf_vc_v_fv_e16m8( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e16m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv32i16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.nxv32i16.iXLen.f16.iXLen(iXLen, , half, iXLen) - -define @test_sf_vc_v_fv_e32mf2( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e32mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv1i32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.nxv1i32.iXLen.f32.iXLen(iXLen, , float, iXLen) - -define @test_sf_vc_v_fv_e32m1( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e32m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv2i32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.nxv2i32.iXLen.f32.iXLen(iXLen, , float, iXLen) - -define @test_sf_vc_v_fv_e32m2( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e32m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv4i32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.nxv4i32.iXLen.f32.iXLen(iXLen, , float, iXLen) - -define @test_sf_vc_v_fv_e32m4( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e32m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv8i32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.nxv8i32.iXLen.f32.iXLen(iXLen, , float, iXLen) - -define @test_sf_vc_v_fv_e32m8( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e32m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv16i32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.nxv16i32.iXLen.f32.iXLen(iXLen, , float, iXLen) - -define @test_sf_vc_v_fv_e64m1( %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e64m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv1i64.iXLen.f64.iXLen(iXLen 1, %vs2, double %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.nxv1i64.iXLen.f64.iXLen(iXLen, , double, iXLen) - -define @test_sf_vc_v_fv_e64m2( %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e64m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv2i64.iXLen.f64.iXLen(iXLen 1, %vs2, double %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.nxv2i64.iXLen.f64.iXLen(iXLen, , double, iXLen) - -define @test_sf_vc_v_fv_e64m4( %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e64m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv4i64.iXLen.f64.iXLen(iXLen 1, %vs2, double %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.nxv4i64.iXLen.f64.iXLen(iXLen, , double, iXLen) - -define @test_sf_vc_v_fv_e64m8( %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fv_e64m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv8i64.iXLen.f64.iXLen(iXLen 1, %vs2, double %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fv.nxv8i64.iXLen.f64.iXLen(iXLen, , double, iXLen) - -define void @test_f_sf_vc_vv_se_e16mf4( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vv_se_e16mf4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f16.nxv1f16.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f16.nxv1f16.iXLen(iXLen, iXLen, , , iXLen) - -define void @test_f_sf_vc_vv_se_e16mf2( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vv_se_e16mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f16.nxv2f16.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f16.nxv2f16.iXLen(iXLen, iXLen, , , iXLen) - -define void @test_f_sf_vc_vv_se_e16m1( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vv_se_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f16.nxv4f16.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f16.nxv4f16.iXLen(iXLen, iXLen, , , iXLen) - -define void @test_f_sf_vc_vv_se_e16m2( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vv_se_e16m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f16.nxv8f16.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f16.nxv8f16.iXLen(iXLen, iXLen, , , iXLen) - -define void @test_f_sf_vc_vv_se_e16m4( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vv_se_e16m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f16.nxv16f16.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f16.nxv16f16.iXLen(iXLen, iXLen, , , iXLen) - -define void @test_f_sf_vc_vv_se_e16m8( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vv_se_e16m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.vv 3, 31, v8, v16 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32f16.nxv32f16.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32f16.nxv32f16.iXLen(iXLen, iXLen, , , iXLen) - -define void @test_f_sf_vc_vv_se_e32mf2( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vv_se_e32mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f32.nxv1f32.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f32.nxv1f32.iXLen(iXLen, iXLen, , , iXLen) - -define void @test_f_sf_vc_vv_se_e32m1( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vv_se_e32m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f32.nxv2f32.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f32.nxv2f32.iXLen(iXLen, iXLen, , , iXLen) - -define void @test_f_sf_vc_vv_se_e32m2( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vv_se_e32m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f32.nxv4f32.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f32.nxv4f32.iXLen(iXLen, iXLen, , , iXLen) - -define void @test_f_sf_vc_vv_se_e32m4( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vv_se_e32m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f32.nxv8f32.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f32.nxv8f32.iXLen(iXLen, iXLen, , , iXLen) - -define void @test_f_sf_vc_vv_se_e32m8( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vv_se_e32m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.vv 3, 31, v8, v16 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f32.nxv16f32.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f32.nxv16f32.iXLen(iXLen, iXLen, , , iXLen) - -define void @test_f_sf_vc_vv_se_e64m1( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vv_se_e64m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f64.nxv1f64.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f64.nxv1f64.iXLen(iXLen, iXLen, , , iXLen) - -define void @test_f_sf_vc_vv_se_e64m2( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vv_se_e64m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f64.nxv2f64.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f64.nxv2f64.iXLen(iXLen, iXLen, , , iXLen) - -define void @test_f_sf_vc_vv_se_e64m4( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vv_se_e64m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f64.nxv4f64.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f64.nxv4f64.iXLen(iXLen, iXLen, , , iXLen) - -define void @test_f_sf_vc_vv_se_e64m8( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vv_se_e64m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: sf.vc.vv 3, 31, v8, v16 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f64.nxv8f64.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f64.nxv8f64.iXLen(iXLen, iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_se_e16mf4( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_se_e16mf4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv1f16.iXLen.nxv1f16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.se.nxv1f16.iXLen.nxv1f16.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_se_e16mf2( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_se_e16mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv2f16.iXLen.nxv2f16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.se.nxv2f16.iXLen.nxv2f16.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_se_e16m1( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_se_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv4f16.iXLen.nxv4f16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.se.nxv4f16.iXLen.nxv4f16.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_se_e16m2( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_se_e16m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv8f16.iXLen.nxv8f16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.se.nxv8f16.iXLen.nxv8f16.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_se_e16m4( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_se_e16m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv16f16.iXLen.nxv16f16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.se.nxv16f16.iXLen.nxv16f16.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_se_e16m8( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_se_e16m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv32f16.iXLen.nxv32f16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.se.nxv32f16.iXLen.nxv32f16.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_se_e32mf2( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_se_e32mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv1f32.iXLen.nxv1f32.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.se.nxv1f32.iXLen.nxv1f32.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_se_e32m1( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_se_e32m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv2f32.iXLen.nxv2f32.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.se.nxv2f32.iXLen.nxv2f32.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_se_e32m2( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_se_e32m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv4f32.iXLen.nxv4f32.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.se.nxv4f32.iXLen.nxv4f32.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_se_e32m4( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_se_e32m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv8f32.iXLen.nxv8f32.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.se.nxv8f32.iXLen.nxv8f32.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_se_e32m8( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_se_e32m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv16f32.iXLen.nxv16f32.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.se.nxv16f32.iXLen.nxv16f32.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_se_e64m1( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_se_e64m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv1f64.iXLen.nxv1f64.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.se.nxv1f64.iXLen.nxv1f64.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_se_e64m2( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_se_e64m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv2f64.iXLen.nxv2f64.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.se.nxv2f64.iXLen.nxv2f64.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_se_e64m4( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_se_e64m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv4f64.iXLen.nxv4f64.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.se.nxv4f64.iXLen.nxv4f64.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_se_e64m8( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_se_e64m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv8f64.iXLen.nxv8f64.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.se.nxv8f64.iXLen.nxv8f64.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_e16mf4( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_e16mf4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv1f16.iXLen.nxv1f16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.nxv1f16.iXLen.nxv1f16.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_e16mf2( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_e16mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv2f16.iXLen.nxv2f16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.nxv2f16.iXLen.nxv2f16.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_e16m1( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv4f16.iXLen.nxv4f16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.nxv4f16.iXLen.nxv4f16.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_e16m2( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_e16m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv8f16.iXLen.nxv8f16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.nxv8f16.iXLen.nxv8f16.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_e16m4( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_e16m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv16f16.iXLen.nxv16f16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.nxv16f16.iXLen.nxv16f16.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_e16m8( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_e16m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv32f16.iXLen.nxv32f16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.nxv32f16.iXLen.nxv32f16.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_e32mf2( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_e32mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv1f32.iXLen.nxv1f32.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.nxv1f32.iXLen.nxv1f32.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_e32m1( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_e32m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv2f32.iXLen.nxv2f32.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.nxv2f32.iXLen.nxv2f32.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_e32m2( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_e32m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv4f32.iXLen.nxv4f32.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.nxv4f32.iXLen.nxv4f32.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_e32m4( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_e32m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv8f32.iXLen.nxv8f32.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.nxv8f32.iXLen.nxv8f32.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_e32m8( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_e32m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv16f32.iXLen.nxv16f32.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.nxv16f32.iXLen.nxv16f32.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_e64m1( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_e64m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv1f64.iXLen.nxv1f64.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.nxv1f64.iXLen.nxv1f64.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_e64m2( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_e64m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv2f64.iXLen.nxv2f64.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.nxv2f64.iXLen.nxv2f64.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_e64m4( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_e64m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv4f64.iXLen.nxv4f64.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.nxv4f64.iXLen.nxv4f64.iXLen(iXLen, , , iXLen) - -define @test_f_sf_vc_v_vv_e64m8( %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vv_e64m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vv.nxv8f64.iXLen.nxv8f64.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vv.nxv8f64.iXLen.nxv8f64.iXLen(iXLen, , , iXLen) - -define void @test_f_sf_vc_xv_se_e16mf4( %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xv_se_e16mf4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f16.i16.iXLen(iXLen 3, iXLen 31, %vs2, i16 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - -define void @test_f_sf_vc_xv_se_e16mf2( %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xv_se_e16mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f16.i16.iXLen(iXLen 3, iXLen 31, %vs2, i16 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - -define void @test_f_sf_vc_xv_se_e16m1( %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xv_se_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f16.i16.iXLen(iXLen 3, iXLen 31, %vs2, i16 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - -define void @test_f_sf_vc_xv_se_e16m2( %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xv_se_e16m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f16.i16.iXLen(iXLen 3, iXLen 31, %vs2, i16 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - -define void @test_f_sf_vc_xv_se_e16m4( %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xv_se_e16m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f16.i16.iXLen(iXLen 3, iXLen 31, %vs2, i16 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - -define void @test_f_sf_vc_xv_se_e16m8( %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xv_se_e16m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32f16.i16.iXLen(iXLen 3, iXLen 31, %vs2, i16 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32f16.i16.iXLen(iXLen, iXLen, , i16, iXLen) - -define void @test_f_sf_vc_xv_se_e32mf2( %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xv_se_e32mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f32.i32.iXLen(iXLen 3, iXLen 31, %vs2, i32 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f32.i32.iXLen(iXLen, iXLen, , i32, iXLen) - -define void @test_f_sf_vc_xv_se_e32m1( %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xv_se_e32m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f32.i32.iXLen(iXLen 3, iXLen 31, %vs2, i32 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f32.i32.iXLen(iXLen, iXLen, , i32, iXLen) - -define void @test_f_sf_vc_xv_se_e32m2( %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xv_se_e32m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f32.i32.iXLen(iXLen 3, iXLen 31, %vs2, i32 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f32.i32.iXLen(iXLen, iXLen, , i32, iXLen) - -define void @test_f_sf_vc_xv_se_e32m4( %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xv_se_e32m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f32.i32.iXLen(iXLen 3, iXLen 31, %vs2, i32 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f32.i32.iXLen(iXLen, iXLen, , i32, iXLen) - -define void @test_f_sf_vc_xv_se_e32m8( %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xv_se_e32m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f32.i32.iXLen(iXLen 3, iXLen 31, %vs2, i32 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f32.i32.iXLen(iXLen, iXLen, , i32, iXLen) - -define @test_f_sf_vc_v_xv_se_e16mf4( %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xv_se_e16mf4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv1f16.iXLen.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xv.se.nxv1f16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - -define @test_f_sf_vc_v_xv_se_e16mf2( %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xv_se_e16mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv2f16.iXLen.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xv.se.nxv2f16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - -define @test_f_sf_vc_v_xv_se_e16m1( %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xv_se_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv4f16.iXLen.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xv.se.nxv4f16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - -define @test_f_sf_vc_v_xv_se_e16m2( %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xv_se_e16m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv8f16.iXLen.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xv.se.nxv8f16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - -define @test_f_sf_vc_v_xv_se_e16m4( %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xv_se_e16m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv16f16.iXLen.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xv.se.nxv16f16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - -define @test_f_sf_vc_v_xv_se_e16m8( %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xv_se_e16m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv32f16.iXLen.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xv.se.nxv32f16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - -define @test_f_sf_vc_v_xv_se_e32mf2( %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xv_se_e32mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv1f32.i32.f32.iXLen(iXLen 3, %vs2, i32 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xv.se.nxv1f32.i32.f32.iXLen(iXLen, , i32, iXLen) - -define @test_f_sf_vc_v_xv_se_e32m1( %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xv_se_e32m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv2f32.i32.f32.iXLen(iXLen 3, %vs2, i32 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xv.se.nxv2f32.i32.f32.iXLen(iXLen, , i32, iXLen) - -define @test_f_sf_vc_v_xv_se_e32m2( %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xv_se_e32m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv4f32.i32.f32.iXLen(iXLen 3, %vs2, i32 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xv.se.nxv4f32.i32.f32.iXLen(iXLen, , i32, iXLen) - -define @test_f_sf_vc_v_xv_se_e32m4( %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xv_se_e32m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv8f32.i32.f32.iXLen(iXLen 3, %vs2, i32 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xv.se.nxv8f32.i32.f32.iXLen(iXLen, , i32, iXLen) - -define @test_f_sf_vc_v_xv_se_e32m8( %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xv_se_e32m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv16f32.i32.f32.iXLen(iXLen 3, %vs2, i32 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xv.se.nxv16f32.i32.f32.iXLen(iXLen, , i32, iXLen) - -define @test_f_sf_vc_v_xv_e16mf4( %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xv_e16mf4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv1f16.iXLen.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xv.nxv1f16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - -define @test_f_sf_vc_v_xv_e16mf2( %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xv_e16mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv2f16.iXLen.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xv.nxv2f16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - -define @test_f_sf_vc_v_xv_e16m1( %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xv_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv4f16.iXLen.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xv.nxv4f16.iXLen.i16.iXLen(iXLen, , i16, iXLen) - -define @test_f_sf_vc_v_xv_e16m2( %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xv_e16m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv8f16.iXLen.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f16.nxv1i16.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.xv.nxv8f16.iXLen.i16.iXLen(iXLen, , i16, iXLen) +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f16.nxv1i16.iXLen(iXLen, iXLen, , , iXLen) -define @test_f_sf_vc_v_xv_e16m4( %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xv_e16m4: +define @test_sf_vc_v_fvv_se_e16mf4( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv16f16.iXLen.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv1f16.iXLen.nxv1f16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv16f16.iXLen.i16.iXLen(iXLen, , i16, iXLen) +declare @llvm.riscv.sf.vc.v.vv.se.nxv1f16.iXLen.nxv1f16.iXLen(iXLen, , , iXLen) -define @test_f_sf_vc_v_xv_e16m8( %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xv_e16m8: +define void @test_sf_vc_fvv_se_e16mf2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e16mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv32f16.iXLen.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f16.nxv2i16.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.xv.nxv32f16.iXLen.i16.iXLen(iXLen, , i16, iXLen) +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f16.nxv2i16.iXLen(iXLen, iXLen, , , iXLen) -define @test_f_sf_vc_v_xv_e32mf2( %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xv_e32mf2: +define @test_sf_vc_v_fvv_se_e16mf2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv1f32.i32.f32.iXLen(iXLen 3, %vs2, i32 %rs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv2f16.iXLen.nxv2f16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv1f32.i32.f32.iXLen(iXLen, , i32, iXLen) +declare @llvm.riscv.sf.vc.v.vv.se.nxv2f16.iXLen.nxv2f16.iXLen(iXLen, , , iXLen) -define @test_f_sf_vc_v_xv_e32m1( %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xv_e32m1: +define void @test_sf_vc_fvv_se_e16m1( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e16m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv2f32.i32.f32.iXLen(iXLen 3, %vs2, i32 %rs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f16.nxv4i16.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.xv.nxv2f32.i32.f32.iXLen(iXLen, , i32, iXLen) +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f16.nxv4i16.iXLen(iXLen, iXLen, , , iXLen) -define @test_f_sf_vc_v_xv_e32m2( %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xv_e32m2: +define @test_sf_vc_v_fvv_se_e16m1( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv4f32.i32.f32.iXLen(iXLen 3, %vs2, i32 %rs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv4f16.iXLen.nxv4f16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv4f32.i32.f32.iXLen(iXLen, , i32, iXLen) +declare @llvm.riscv.sf.vc.v.vv.se.nxv4f16.iXLen.nxv4f16.iXLen(iXLen, , , iXLen) -define @test_f_sf_vc_v_xv_e32m4( %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xv_e32m4: +define void @test_sf_vc_fvv_se_e16m2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e16m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv8f32.i32.f32.iXLen(iXLen 3, %vs2, i32 %rs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f16.nxv8i16.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.xv.nxv8f32.i32.f32.iXLen(iXLen, , i32, iXLen) +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f16.nxv8i16.iXLen(iXLen, iXLen, , , iXLen) -define @test_f_sf_vc_v_xv_e32m8( %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xv_e32m8: +define @test_sf_vc_v_fvv_se_e16m2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.xv.nxv16f32.i32.f32.iXLen(iXLen 3, %vs2, i32 %rs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv8f16.iXLen.nxv8f16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.xv.nxv16f32.i32.f32.iXLen(iXLen, , i32, iXLen) +declare @llvm.riscv.sf.vc.v.vv.se.nxv8f16.iXLen.nxv8f16.iXLen(iXLen, , , iXLen) -define void @test_f_sf_vc_iv_se_e16mf4( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_iv_se_e16mf4: +define void @test_sf_vc_fvv_se_e16m4( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e16m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f16.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f16.nxv16i16.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f16.nxv16i16.iXLen(iXLen, iXLen, , , iXLen) -define void @test_f_sf_vc_iv_se_e16mf2( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_iv_se_e16mf2: +define @test_sf_vc_v_fvv_se_e16m4( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f16.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv16f16.iXLen.nxv16f16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.vv.se.nxv16f16.iXLen.nxv16f16.iXLen(iXLen, , , iXLen) -define void @test_f_sf_vc_iv_se_e16m1( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_iv_se_e16m1: +define void @test_sf_vc_fvv_se_e16m8( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e16m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f16.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32f16.nxv32i16.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32f16.nxv32i16.iXLen(iXLen, iXLen, , , iXLen) -define void @test_f_sf_vc_iv_se_e16m2( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_iv_se_e16m2: +define @test_sf_vc_v_fvv_se_e16m8( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f16.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv32f16.iXLen.nxv32f16.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.vv.se.nxv32f16.iXLen.nxv32f16.iXLen(iXLen, , , iXLen) -define void @test_f_sf_vc_iv_se_e16m4( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_iv_se_e16m4: +define void @test_sf_vc_fvv_se_e32mf2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f16.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f32.nxv1i32.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f32.nxv1i32.iXLen(iXLen, iXLen, , , iXLen) -define void @test_f_sf_vc_iv_se_e16m8( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_iv_se_e16m8: +define @test_sf_vc_v_fvv_se_e32mf2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32f16.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv1f32.iXLen.nxv1f32.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32f16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.vv.se.nxv1f32.iXLen.nxv1f32.iXLen(iXLen, , , iXLen) -define void @test_f_sf_vc_iv_se_e32mf2( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_iv_se_e32mf2: +define void @test_sf_vc_fvv_se_e32m1( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f32.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f32.nxv2i32.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f32.nxv2i32.iXLen(iXLen, iXLen, , , iXLen) -define void @test_f_sf_vc_iv_se_e32m1( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_iv_se_e32m1: +define @test_sf_vc_v_fvv_se_e32m1( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f32.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv2f32.iXLen.nxv2f32.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.vv.se.nxv2f32.iXLen.nxv2f32.iXLen(iXLen, , , iXLen) -define void @test_f_sf_vc_iv_se_e32m2( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_iv_se_e32m2: +define void @test_sf_vc_fvv_se_e32m2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e32m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f32.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f32.nxv4i32.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f32.nxv4i32.iXLen(iXLen, iXLen, , , iXLen) -define void @test_f_sf_vc_iv_se_e32m4( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_iv_se_e32m4: +define @test_sf_vc_v_fvv_se_e32m2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f32.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv4f32.iXLen.nxv4f32.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.vv.se.nxv4f32.iXLen.nxv4f32.iXLen(iXLen, , , iXLen) -define void @test_f_sf_vc_iv_se_e32m8( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_iv_se_e32m8: +define void @test_sf_vc_fvv_se_e32m4( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f32.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f32.nxv8i32.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f32.nxv8i32.iXLen(iXLen, iXLen, , , iXLen) -define void @test_f_sf_vc_iv_se_e64m1( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_iv_se_e64m1: +define @test_sf_vc_v_fvv_se_e32m4( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f64.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv8f32.iXLen.nxv8f32.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f64.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.vv.se.nxv8f32.iXLen.nxv8f32.iXLen(iXLen, , , iXLen) -define void @test_f_sf_vc_iv_se_e64m2( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_iv_se_e64m2: +define void @test_sf_vc_fvv_se_e32m8( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f64.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f32.nxv16i32.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f64.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f32.nxv16i32.iXLen(iXLen, iXLen, , , iXLen) -define void @test_f_sf_vc_iv_se_e64m4( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_iv_se_e64m4: +define @test_sf_vc_v_fvv_se_e32m8( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f64.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv16f32.iXLen.nxv16f32.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f64.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.vv.se.nxv16f32.iXLen.nxv16f32.iXLen(iXLen, , , iXLen) -define void @test_f_sf_vc_iv_se_e64m8( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_iv_se_e64m8: +define void @test_sf_vc_fvv_se_e64m1( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e64m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f64.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 10, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f64.nxv1i64.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f64.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) - -define @test_f_sf_vc_v_iv_se_e16mf4( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_se_e16mf4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv1f16.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.iv.se.nxv1f16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f64.nxv1i64.iXLen(iXLen, iXLen, , , iXLen) -define @test_f_sf_vc_v_iv_se_e16mf2( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_se_e16mf2: +define @test_sf_vc_v_fvv_se_e64m1( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv2f16.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv1f64.iXLen.nxv1f64.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv2f16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.vv.se.nxv1f64.iXLen.nxv1f64.iXLen(iXLen, , , iXLen) -define @test_f_sf_vc_v_iv_se_e16m1( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_se_e16m1: +define void @test_sf_vc_fvv_se_e64m2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e64m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv4f16.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f64.nxv2i64.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.iv.se.nxv4f16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f64.nxv2i64.iXLen(iXLen, iXLen, , , iXLen) -define @test_f_sf_vc_v_iv_se_e16m2( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_se_e16m2: +define @test_sf_vc_v_fvv_se_e64m2( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv8f16.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv2f64.iXLen.nxv2f64.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv8f16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.vv.se.nxv2f64.iXLen.nxv2f64.iXLen(iXLen, , , iXLen) -define @test_f_sf_vc_v_iv_se_e16m4( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_se_e16m4: +define void @test_sf_vc_fvv_se_e64m4( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e64m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv16f16.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f64.nxv4i64.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.iv.se.nxv16f16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f64.nxv4i64.iXLen(iXLen, iXLen, , , iXLen) -define @test_f_sf_vc_v_iv_se_e16m8( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_se_e16m8: +define @test_sf_vc_v_fvv_se_e64m4( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv32f16.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv4f64.iXLen.nxv4f64.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv32f16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.vv.se.nxv4f64.iXLen.nxv4f64.iXLen(iXLen, , , iXLen) -define @test_f_sf_vc_v_iv_se_e32mf2( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_se_e32mf2: +define void @test_sf_vc_fvv_se_e64m8( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvv_se_e64m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: sf.vc.vv 3, 31, v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv1f32.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f64.nxv8i64.iXLen(iXLen 3, iXLen 31, %vs2, %vs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.iv.se.nxv1f32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f64.nxv8i64.iXLen(iXLen, iXLen, , , iXLen) -define @test_f_sf_vc_v_iv_se_e32m1( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_se_e32m1: +define @test_sf_vc_v_fvv_se_e64m8( %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv2f32.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.vv.se.nxv8f64.iXLen.nxv8f64.iXLen(iXLen 3, %vs2, %vs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv2f32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.vv.se.nxv8f64.iXLen.nxv8f64.iXLen(iXLen, , , iXLen) -define @test_f_sf_vc_v_iv_se_e32m2( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_se_e32m2: +define void @test_sf_vc_fvx_se_e16mf4( %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvx_se_e16mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv4f32.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f16.nxv1f16.i16.iXLen(iXLen 3, iXLen 31, %vs2, i16 %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.iv.se.nxv4f32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f16.nxv1f16.i16.iXLen(iXLen, iXLen, , i16, iXLen) -define @test_f_sf_vc_v_iv_se_e32m4( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_se_e32m4: +define @test_sf_vc_v_fvx_se_e16mf4( %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvx_se_e16mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv8f32.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv1f16.nxv1f16.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv8f32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xv.se.nxv1f16.nxv1f16.i16.iXLen(iXLen, , i16, iXLen) -define @test_f_sf_vc_v_iv_se_e32m8( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_se_e32m8: +define void @test_sf_vc_fvx_se_e16mf2( %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvx_se_e16mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv16f32.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f16.nxv2f16.i16.iXLen(iXLen 3, iXLen 31, %vs2, i16 %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.iv.se.nxv16f32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f16.nxv2f16.i16.iXLen(iXLen, iXLen, , i16, iXLen) -define @test_f_sf_vc_v_iv_se_e64m1( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_se_e64m1: +define @test_sf_vc_v_fvx_se_e16mf2( %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvx_se_e16mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv1f64.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv2f16.nxv2f16.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv1f64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xv.se.nxv2f16.nxv2f16.i16.iXLen(iXLen, , i16, iXLen) -define @test_f_sf_vc_v_iv_se_e64m2( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_se_e64m2: +define void @test_sf_vc_fvx_se_e16m1( %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvx_se_e16m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv2f64.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f16.nxv4f16.i16.iXLen(iXLen 3, iXLen 31, %vs2, i16 %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.iv.se.nxv2f64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f16.nxv4f16.i16.iXLen(iXLen, iXLen, , i16, iXLen) -define @test_f_sf_vc_v_iv_se_e64m4( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_se_e64m4: +define @test_sf_vc_v_fvx_se_e16m1( %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvx_se_e16m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv4f64.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv4f16.nxv4f16.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.iv.se.nxv4f64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xv.se.nxv4f16.nxv4f16.i16.iXLen(iXLen, , i16, iXLen) -define @test_f_sf_vc_v_iv_se_e64m8( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_se_e64m8: +define void @test_sf_vc_fvx_se_e16m2( %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvx_se_e16m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv8f64.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f16.nxv8f16.i16.iXLen(iXLen 3, iXLen 31, %vs2, i16 %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.iv.se.nxv8f64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f16.nxv8f16.i16.iXLen(iXLen, iXLen, , i16, iXLen) -define @test_f_sf_vc_v_iv_e16mf4( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_e16mf4: +define @test_sf_vc_v_fvx_se_e16m2( %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvx_se_e16m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv1f16.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv8f16.nxv8f16.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv1f16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xv.se.nxv8f16.nxv8f16.i16.iXLen(iXLen, , i16, iXLen) -define @test_f_sf_vc_v_iv_e16mf2( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_e16mf2: +define void @test_sf_vc_fvx_se_e16m4( %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvx_se_e16m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv2f16.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f16.nxv16f16.i16.iXLen(iXLen 3, iXLen 31, %vs2, i16 %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.iv.nxv2f16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f16.nxv16f16.i16.iXLen(iXLen, iXLen, , i16, iXLen) -define @test_f_sf_vc_v_iv_e16m1( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_e16m1: +define @test_sf_vc_v_fvx_se_e16m4( %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvx_se_e16m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv4f16.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv16f16.nxv16f16.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv4f16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xv.se.nxv16f16.nxv16f16.i16.iXLen(iXLen, , i16, iXLen) -define @test_f_sf_vc_v_iv_e16m2( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_e16m2: +define void @test_sf_vc_fvx_se_e16m8( %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvx_se_e16m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv8f16.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32f16.nxv32f16.i16.iXLen(iXLen 3, iXLen 31, %vs2, i16 %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.iv.nxv8f16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32f16.nxv32f16.i16.iXLen(iXLen, iXLen, , i16, iXLen) -define @test_f_sf_vc_v_iv_e16m4( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_e16m4: +define @test_sf_vc_v_fvx_se_e16m8( %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvx_se_e16m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv16f16.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv32f16.nxv32f16.i16.iXLen(iXLen 3, %vs2, i16 %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv16f16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xv.se.nxv32f16.nxv32f16.i16.iXLen(iXLen, , i16, iXLen) -define @test_f_sf_vc_v_iv_e16m8( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_e16m8: +define void @test_sf_vc_fvx_se_e32mf2( %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvx_se_e32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv32f16.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f32.nxv1f32.i32.iXLen(iXLen 3, iXLen 31, %vs2, i32 %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.iv.nxv32f16.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f32.nxv1f32.i32.iXLen(iXLen, iXLen, , i32, iXLen) -define @test_f_sf_vc_v_iv_e32mf2( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_e32mf2: +define @test_sf_vc_v_fvx_se_e32mf2( %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvx_se_e32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv1f32.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv1f32.nxv1f32.i32.iXLen(iXLen 3, %vs2, i32 %rs1, iXLen %vl) ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv1f32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xv.se.nxv1f32.nxv1f32.i32.iXLen(iXLen, , i32, iXLen) -define @test_f_sf_vc_v_iv_e32m1( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_e32m1: +define void @test_sf_vc_fvx_se_e32m1( %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvx_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv2f32.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f32.nxv2f32.i32.iXLen(iXLen 3, iXLen 31, %vs2, i32 %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.iv.nxv2f32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f32.nxv2f32.i32.iXLen(iXLen, iXLen, , i32, iXLen) -define @test_f_sf_vc_v_iv_e32m2( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_e32m2: +define @test_sf_vc_v_fvx_se_e32m1( %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvx_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv4f32.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv2f32.nxv2f32.i32.iXLen(iXLen 3, %vs2, i32 %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv4f32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xv.se.nxv2f32.nxv2f32.i32.iXLen(iXLen, , i32, iXLen) -define @test_f_sf_vc_v_iv_e32m4( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_e32m4: +define void @test_sf_vc_fvx_se_e32m2( %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvx_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv8f32.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f32.nxv4f32.i32.iXLen(iXLen 3, iXLen 31, %vs2, i32 %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.iv.nxv8f32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f32.nxv4f32.i32.iXLen(iXLen, iXLen, , i32, iXLen) -define @test_f_sf_vc_v_iv_e32m8( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_e32m8: +define @test_sf_vc_v_fvx_se_e32m2( %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvx_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv16f32.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv4f32.nxv4f32.i32.iXLen(iXLen 3, %vs2, i32 %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv16f32.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xv.se.nxv4f32.nxv4f32.i32.iXLen(iXLen, , i32, iXLen) -define @test_f_sf_vc_v_iv_e64m1( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_e64m1: +define void @test_sf_vc_fvx_se_e32m4( %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvx_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv1f64.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f32.nxv8f32.i32.iXLen(iXLen 3, iXLen 31, %vs2, i32 %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.iv.nxv1f64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f32.nxv8f32.i32.iXLen(iXLen, iXLen, , i32, iXLen) -define @test_f_sf_vc_v_iv_e64m2( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_e64m2: +define @test_sf_vc_v_fvx_se_e32m4( %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvx_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv2f64.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv8f32.nxv8f32.i32.iXLen(iXLen 3, %vs2, i32 %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv2f64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xv.se.nxv8f32.nxv8f32.i32.iXLen(iXLen, , i32, iXLen) -define @test_f_sf_vc_v_iv_e64m4( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_e64m4: +define void @test_sf_vc_fvx_se_e32m8( %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvx_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv4f64.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f32.nxv16f32.i32.iXLen(iXLen 3, iXLen 31, %vs2, i32 %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.iv.nxv4f64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f32.nxv16f32.i32.iXLen(iXLen, iXLen, , i32, iXLen) -define @test_f_sf_vc_v_iv_e64m8( %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_iv_e64m8: +define @test_sf_vc_v_fvx_se_e32m8( %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvx_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10 +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.iv.nxv8f64.iXLen.iXLen.iXLen(iXLen 3, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.xv.se.nxv16f32.nxv16f32.i32.iXLen(iXLen 3, %vs2, i32 %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.iv.nxv8f64.iXLen.iXLen.iXLen(iXLen, , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xv.se.nxv16f32.nxv16f32.i32.iXLen(iXLen, , i32, iXLen) -define void @test_f_sf_vc_fv_se_e16mf4( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fv_se_e16mf4: +define void @test_sf_vc_fvi_se_e16mf4( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvi_se_e16mf4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 8 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f16.f16.iXLen(iXLen 1, iXLen 31, %vs2, half %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f16.nxv1f16.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 8, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f16.f16.iXLen(iXLen, iXLen, , half, iXLen) +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f16.nxv1f16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) -define void @test_f_sf_vc_fv_se_e16mf2( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fv_se_e16mf2: +define @test_sf_vc_v_fvi_se_e16mf4( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvi_se_e16mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 8 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f16.f16.iXLen(iXLen 1, iXLen 31, %vs2, half %fs1, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv1f16.nxv1f16.iXLen.iXLen(iXLen 3, %vs2, iXLen 8, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f16.f16.iXLen(iXLen, iXLen, , half, iXLen) +declare @llvm.riscv.sf.vc.v.iv.se.nxv1f16.nxv1f16.iXLen.iXLen(iXLen, , iXLen, iXLen) -define void @test_f_sf_vc_fv_se_e16m1( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fv_se_e16m1: +define void @test_sf_vc_fvi_se_e16mf2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvi_se_e16mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 8 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f16.f16.iXLen(iXLen 1, iXLen 31, %vs2, half %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f16.nxv2f16.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 8, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f16.f16.iXLen(iXLen, iXLen, , half, iXLen) +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f16.nxv2f16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) -define void @test_f_sf_vc_fv_se_e16m2( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fv_se_e16m2: +define @test_sf_vc_v_fvi_se_e16mf2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvi_se_e16mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 8 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f16.f16.iXLen(iXLen 1, iXLen 31, %vs2, half %fs1, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv2f16.nxv2f16.iXLen.iXLen(iXLen 3, %vs2, iXLen 8, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f16.f16.iXLen(iXLen, iXLen, , half, iXLen) +declare @llvm.riscv.sf.vc.v.iv.se.nxv2f16.nxv2f16.iXLen.iXLen(iXLen, , iXLen, iXLen) -define void @test_f_sf_vc_fv_se_e16m4( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fv_se_e16m4: +define void @test_sf_vc_fvi_se_e16m1( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvi_se_e16m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 8 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f16.f16.iXLen(iXLen 1, iXLen 31, %vs2, half %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f16.nxv4f16.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 8, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f16.f16.iXLen(iXLen, iXLen, , half, iXLen) +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f16.nxv4f16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) -define void @test_f_sf_vc_fv_se_e16m8( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fv_se_e16m8: +define @test_sf_vc_v_fvi_se_e16m1( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvi_se_e16m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 8 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32f16.f16.iXLen(iXLen 1, iXLen 31, %vs2, half %fs1, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv4f16.nxv4f16.iXLen.iXLen(iXLen 3, %vs2, iXLen 8, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32f16.f16.iXLen(iXLen, iXLen, , half, iXLen) +declare @llvm.riscv.sf.vc.v.iv.se.nxv4f16.nxv4f16.iXLen.iXLen(iXLen, , iXLen, iXLen) -define void @test_f_sf_vc_fv_se_e32mf2( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fv_se_e32mf2: +define void @test_sf_vc_fvi_se_e16m2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvi_se_e16m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 8 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f32.f32.iXLen(iXLen 1, iXLen 31, %vs2, float %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f16.nxv8f16.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 8, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f32.f32.iXLen(iXLen, iXLen, , float, iXLen) +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f16.nxv8f16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) -define void @test_f_sf_vc_fv_se_e32m1( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fv_se_e32m1: +define @test_sf_vc_v_fvi_se_e16m2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvi_se_e16m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 8 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f32.f32.iXLen(iXLen 1, iXLen 31, %vs2, float %fs1, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv8f16.nxv8f16.iXLen.iXLen(iXLen 3, %vs2, iXLen 8, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f32.f32.iXLen(iXLen, iXLen, , float, iXLen) +declare @llvm.riscv.sf.vc.v.iv.se.nxv8f16.nxv8f16.iXLen.iXLen(iXLen, , iXLen, iXLen) -define void @test_f_sf_vc_fv_se_e32m2( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fv_se_e32m2: +define void @test_sf_vc_fvi_se_e16m4( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvi_se_e16m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 8 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f32.f32.iXLen(iXLen 1, iXLen 31, %vs2, float %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f16.nxv16f16.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 8, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f32.f32.iXLen(iXLen, iXLen, , float, iXLen) +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f16.nxv16f16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) -define void @test_f_sf_vc_fv_se_e32m4( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fv_se_e32m4: +define @test_sf_vc_v_fvi_se_e16m4( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvi_se_e16m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 8 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f32.f32.iXLen(iXLen 1, iXLen 31, %vs2, float %fs1, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv16f16.nxv16f16.iXLen.iXLen(iXLen 3, %vs2, iXLen 8, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f32.f32.iXLen(iXLen, iXLen, , float, iXLen) +declare @llvm.riscv.sf.vc.v.iv.se.nxv16f16.nxv16f16.iXLen.iXLen(iXLen, , iXLen, iXLen) -define void @test_f_sf_vc_fv_se_e32m8( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fv_se_e32m8: +define void @test_sf_vc_fvi_se_e16m8( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvi_se_e16m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 8 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f32.f32.iXLen(iXLen 1, iXLen 31, %vs2, float %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32f16.nxv32f16.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 8, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f32.f32.iXLen(iXLen, iXLen, , float, iXLen) +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32f16.nxv32f16.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) -define void @test_f_sf_vc_fv_se_e64m1( %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fv_se_e64m1: +define @test_sf_vc_v_fvi_se_e16m8( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvi_se_e16m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 8 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f64.f64.iXLen(iXLen 1, iXLen 31, %vs2, double %fs1, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv32f16.nxv32f16.iXLen.iXLen(iXLen 3, %vs2, iXLen 8, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f64.f64.iXLen(iXLen, iXLen, , double, iXLen) +declare @llvm.riscv.sf.vc.v.iv.se.nxv32f16.nxv32f16.iXLen.iXLen(iXLen, , iXLen, iXLen) -define void @test_f_sf_vc_fv_se_e64m2( %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fv_se_e64m2: +define void @test_sf_vc_fvi_se_e32mf2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvi_se_e32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 8 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f64.f64.iXLen(iXLen 1, iXLen 31, %vs2, double %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f32.nxv1f32.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 8, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f64.f64.iXLen(iXLen, iXLen, , double, iXLen) +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f32.nxv1f32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) -define void @test_f_sf_vc_fv_se_e64m4( %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fv_se_e64m4: +define @test_sf_vc_v_fvi_se_e32mf2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvi_se_e32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 8 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f64.f64.iXLen(iXLen 1, iXLen 31, %vs2, double %fs1, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv1f32.nxv1f32.iXLen.iXLen(iXLen 3, %vs2, iXLen 8, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f64.f64.iXLen(iXLen, iXLen, , double, iXLen) +declare @llvm.riscv.sf.vc.v.iv.se.nxv1f32.nxv1f32.iXLen.iXLen(iXLen, , iXLen, iXLen) -define void @test_f_sf_vc_fv_se_e64m8( %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fv_se_e64m8: +define void @test_sf_vc_fvi_se_e32m1( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvi_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 8 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f64.f64.iXLen(iXLen 1, iXLen 31, %vs2, double %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f32.nxv2f32.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 8, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f64.f64.iXLen(iXLen, iXLen, , double, iXLen) +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f32.nxv2f32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) -define @test_f_sf_vc_v_fv_se_e16mf4( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_se_e16mf4: +define @test_sf_vc_v_fvi_se_e32m1( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvi_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 8 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv1f16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv2f32.nxv2f32.iXLen.iXLen(iXLen 3, %vs2, iXLen 8, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv1f16.iXLen.f16.iXLen(iXLen, , half, iXLen) +declare @llvm.riscv.sf.vc.v.iv.se.nxv2f32.nxv2f32.iXLen.iXLen(iXLen, , iXLen, iXLen) -define @test_f_sf_vc_v_fv_se_e16mf2( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_se_e16mf2: +define void @test_sf_vc_fvi_se_e32m2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvi_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 8 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv2f16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f32.nxv4f32.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 8, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fv.se.nxv2f16.iXLen.f16.iXLen(iXLen, , half, iXLen) +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f32.nxv4f32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) -define @test_f_sf_vc_v_fv_se_e16m1( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_se_e16m1: +define @test_sf_vc_v_fvi_se_e32m2( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvi_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 8 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv4f16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv4f32.nxv4f32.iXLen.iXLen(iXLen 3, %vs2, iXLen 8, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv4f16.iXLen.f16.iXLen(iXLen, , half, iXLen) +declare @llvm.riscv.sf.vc.v.iv.se.nxv4f32.nxv4f32.iXLen.iXLen(iXLen, , iXLen, iXLen) -define @test_f_sf_vc_v_fv_se_e16m2( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_se_e16m2: +define void @test_sf_vc_fvi_se_e32m4( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvi_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 8 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv8f16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f32.nxv8f32.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 8, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fv.se.nxv8f16.iXLen.f16.iXLen(iXLen, , half, iXLen) +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f32.nxv8f32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) -define @test_f_sf_vc_v_fv_se_e16m4( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_se_e16m4: +define @test_sf_vc_v_fvi_se_e32m4( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvi_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 8 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv16f16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv8f32.nxv8f32.iXLen.iXLen(iXLen 3, %vs2, iXLen 8, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv16f16.iXLen.f16.iXLen(iXLen, , half, iXLen) +declare @llvm.riscv.sf.vc.v.iv.se.nxv8f32.nxv8f32.iXLen.iXLen(iXLen, , iXLen, iXLen) -define @test_f_sf_vc_v_fv_se_e16m8( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_se_e16m8: +define void @test_sf_vc_fvi_se_e32m8( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvi_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.iv 3, 31, v8, 8 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv32f16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f32.nxv16f32.iXLen.iXLen(iXLen 3, iXLen 31, %vs2, iXLen 8, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fv.se.nxv32f16.iXLen.f16.iXLen(iXLen, , half, iXLen) +declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f32.nxv16f32.iXLen.iXLen(iXLen, iXLen, , iXLen, iXLen) -define @test_f_sf_vc_v_fv_se_e32mf2( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_se_e32mf2: +define @test_sf_vc_v_fvi_se_e32m8( %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvi_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 8 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv1f32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.iv.se.nxv16f32.nxv16f32.iXLen.iXLen(iXLen 3, %vs2, iXLen 8, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv1f32.iXLen.f32.iXLen(iXLen, , float, iXLen) +declare @llvm.riscv.sf.vc.v.iv.se.nxv16f32.nxv16f32.iXLen.iXLen(iXLen, , iXLen, iXLen) -define @test_f_sf_vc_v_fv_se_e32m1( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_se_e32m1: +define void @test_sf_vc_fvf_se_e16mf4( %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvf_se_e16mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv2f32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f16.nxv1f16.f16.iXLen(iXLen 1, iXLen 31, %vs2, half %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fv.se.nxv2f32.iXLen.f32.iXLen(iXLen, , float, iXLen) +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f16.nxv1f16.f16.iXLen(iXLen, iXLen, , half, iXLen) -define @test_f_sf_vc_v_fv_se_e32m2( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_se_e32m2: +define @test_sf_vc_v_fvf_se_e16mf4( %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvf_se_e16mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv4f32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv1f16.nxv1f16.iXLen.f16(iXLen 1, %vs2, half %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv4f32.iXLen.f32.iXLen(iXLen, , float, iXLen) +declare @llvm.riscv.sf.vc.v.fv.se.nxv1f16.nxv1f16.iXLen.f16(iXLen, , half, iXLen) -define @test_f_sf_vc_v_fv_se_e32m4( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_se_e32m4: +define void @test_sf_vc_fvf_se_e16mf2( %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvf_se_e16mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv8f32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f16.nxv2f16.f16.iXLen(iXLen 1, iXLen 31, %vs2, half %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fv.se.nxv8f32.iXLen.f32.iXLen(iXLen, , float, iXLen) +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f16.nxv2f16.f16.iXLen(iXLen, iXLen, , half, iXLen) -define @test_f_sf_vc_v_fv_se_e32m8( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_se_e32m8: +define @test_sf_vc_v_fvf_se_e16mf2( %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvf_se_e16mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv16f32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv2f16.nxv2f16.iXLen.f16(iXLen 1, %vs2, half %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv16f32.iXLen.f32.iXLen(iXLen, , float, iXLen) +declare @llvm.riscv.sf.vc.v.fv.se.nxv2f16.nxv2f16.iXLen.f16(iXLen, , half, iXLen) -define @test_f_sf_vc_v_fv_se_e64m1( %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_se_e64m1: +define void @test_sf_vc_fvf_se_e16m1( %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvf_se_e16m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv1f64.iXLen.f64.iXLen(iXLen 1, %vs2, double %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f16.nxv4f16.f16.iXLen(iXLen 1, iXLen 31, %vs2, half %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fv.se.nxv1f64.iXLen.f64.iXLen(iXLen, , double, iXLen) +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f16.nxv4f16.f16.iXLen(iXLen, iXLen, , half, iXLen) -define @test_f_sf_vc_v_fv_se_e64m2( %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_se_e64m2: +define @test_sf_vc_v_fvf_se_e16m1( %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvf_se_e16m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv2f64.iXLen.f64.iXLen(iXLen 1, %vs2, double %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv4f16.nxv4f16.iXLen.f16(iXLen 1, %vs2, half %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv2f64.iXLen.f64.iXLen(iXLen, , double, iXLen) +declare @llvm.riscv.sf.vc.v.fv.se.nxv4f16.nxv4f16.iXLen.f16(iXLen, , half, iXLen) -define @test_f_sf_vc_v_fv_se_e64m4( %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_se_e64m4: +define void @test_sf_vc_fvf_se_e16m2( %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvf_se_e16m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv4f64.iXLen.f64.iXLen(iXLen 1, %vs2, double %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f16.nxv8f16.f16.iXLen(iXLen 1, iXLen 31, %vs2, half %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fv.se.nxv4f64.iXLen.f64.iXLen(iXLen, , double, iXLen) +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f16.nxv8f16.f16.iXLen(iXLen, iXLen, , half, iXLen) -define @test_f_sf_vc_v_fv_se_e64m8( %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_se_e64m8: +define @test_sf_vc_v_fvf_se_e16m2( %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvf_se_e16m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv8f64.iXLen.f64.iXLen(iXLen 1, %vs2, double %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv8f16.nxv8f16.iXLen.f16(iXLen 1, %vs2, half %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fv.se.nxv8f64.iXLen.f64.iXLen(iXLen, , double, iXLen) +declare @llvm.riscv.sf.vc.v.fv.se.nxv8f16.nxv8f16.iXLen.f16(iXLen, , half, iXLen) -define @test_f_sf_vc_v_fv_e16mf4( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_e16mf4: +define void @test_sf_vc_fvf_se_e16m4( %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvf_se_e16m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv1f16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f16.nxv16f16.f16.iXLen(iXLen 1, iXLen 31, %vs2, half %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fv.nxv1f16.iXLen.f16.iXLen(iXLen, , half, iXLen) +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f16.nxv16f16.f16.iXLen(iXLen, iXLen, , half, iXLen) -define @test_f_sf_vc_v_fv_e16mf2( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_e16mf2: +define @test_sf_vc_v_fvf_se_e16m4( %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvf_se_e16m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv2f16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv16f16.nxv16f16.iXLen.f16(iXLen 1, %vs2, half %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fv.nxv2f16.iXLen.f16.iXLen(iXLen, , half, iXLen) +declare @llvm.riscv.sf.vc.v.fv.se.nxv16f16.nxv16f16.iXLen.f16(iXLen, , half, iXLen) -define @test_f_sf_vc_v_fv_e16m1( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_e16m1: +define void @test_sf_vc_fvf_se_e16m8( %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvf_se_e16m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv4f16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32f16.nxv32f16.f16.iXLen(iXLen 1, iXLen 31, %vs2, half %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fv.nxv4f16.iXLen.f16.iXLen(iXLen, , half, iXLen) +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32f16.nxv32f16.f16.iXLen(iXLen, iXLen, , half, iXLen) -define @test_f_sf_vc_v_fv_e16m2( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_e16m2: +define @test_sf_vc_v_fvf_se_e16m8( %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvf_se_e16m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv8f16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv32f16.nxv32f16.iXLen.f16(iXLen 1, %vs2, half %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fv.nxv8f16.iXLen.f16.iXLen(iXLen, , half, iXLen) +declare @llvm.riscv.sf.vc.v.fv.se.nxv32f16.nxv32f16.iXLen.f16(iXLen, , half, iXLen) -define @test_f_sf_vc_v_fv_e16m4( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_e16m4: +define void @test_sf_vc_fvf_se_e32mf2( %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvf_se_e32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv16f16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f32.nxv1f32.f32.iXLen(iXLen 1, iXLen 31, %vs2, float %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fv.nxv16f16.iXLen.f16.iXLen(iXLen, , half, iXLen) +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f32.nxv1f32.f32.iXLen(iXLen, iXLen, , float, iXLen) -define @test_f_sf_vc_v_fv_e16m8( %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_e16m8: +define @test_sf_vc_v_fvf_se_e32mf2( %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvf_se_e32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv32f16.iXLen.f16.iXLen(iXLen 1, %vs2, half %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv1f32.nxv1f32.iXLen.f32(iXLen 1, %vs2, float %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fv.nxv32f16.iXLen.f16.iXLen(iXLen, , half, iXLen) +declare @llvm.riscv.sf.vc.v.fv.se.nxv1f32.nxv1f32.iXLen.f32(iXLen, , float, iXLen) -define @test_f_sf_vc_v_fv_e32mf2( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_e32mf2: +define void @test_sf_vc_fvf_se_e32m1( %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvf_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv1f32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f32.nxv2f32.f32.iXLen(iXLen 1, iXLen 31, %vs2, float %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fv.nxv1f32.iXLen.f32.iXLen(iXLen, , float, iXLen) +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f32.nxv2f32.f32.iXLen(iXLen, iXLen, , float, iXLen) -define @test_f_sf_vc_v_fv_e32m1( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_e32m1: +define @test_sf_vc_v_fvf_se_e32m1( %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvf_se_e32m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv2f32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) + %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv2f32.nxv2f32.iXLen.f32(iXLen 1, %vs2, float %rs1, iXLen %vl) ret %0 } -declare @llvm.riscv.sf.vc.v.fv.nxv2f32.iXLen.f32.iXLen(iXLen, , float, iXLen) +declare @llvm.riscv.sf.vc.v.fv.se.nxv2f32.nxv2f32.iXLen.f32(iXLen, , float, iXLen) -define @test_f_sf_vc_v_fv_e32m2( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_e32m2: +define void @test_sf_vc_fvf_se_e32m2( %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvf_se_e32m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv4f32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f32.nxv4f32.f32.iXLen(iXLen 1, iXLen 31, %vs2, float %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fv.nxv4f32.iXLen.f32.iXLen(iXLen, , float, iXLen) +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f32.nxv4f32.f32.iXLen(iXLen, iXLen, , float, iXLen) -define @test_f_sf_vc_v_fv_e32m4( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_e32m4: +define @test_sf_vc_v_fvf_se_e32m2( %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvf_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv8f32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv4f32.nxv4f32.iXLen.f32(iXLen 1, %vs2, float %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fv.nxv8f32.iXLen.f32.iXLen(iXLen, , float, iXLen) +declare @llvm.riscv.sf.vc.v.fv.se.nxv4f32.nxv4f32.iXLen.f32(iXLen, , float, iXLen) -define @test_f_sf_vc_v_fv_e32m8( %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_e32m8: +define void @test_sf_vc_fvf_se_e32m4( %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvf_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv16f32.iXLen.f32.iXLen(iXLen 1, %vs2, float %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f32.nxv8f32.f32.iXLen(iXLen 1, iXLen 31, %vs2, float %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fv.nxv16f32.iXLen.f32.iXLen(iXLen, , float, iXLen) +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f32.nxv8f32.f32.iXLen(iXLen, iXLen, , float, iXLen) -define @test_f_sf_vc_v_fv_e64m1( %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_e64m1: +define @test_sf_vc_v_fvf_se_e32m4( %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvf_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv1f64.iXLen.f64.iXLen(iXLen 1, %vs2, double %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv8f32.nxv8f32.iXLen.f32(iXLen 1, %vs2, float %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fv.nxv1f64.iXLen.f64.iXLen(iXLen, , double, iXLen) +declare @llvm.riscv.sf.vc.v.fv.se.nxv8f32.nxv8f32.iXLen.f32(iXLen, , float, iXLen) -define @test_f_sf_vc_v_fv_e64m2( %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_e64m2: +define void @test_sf_vc_fvf_se_e32m8( %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvf_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv2f64.iXLen.f64.iXLen(iXLen 1, %vs2, double %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f32.nxv16f32.f32.iXLen(iXLen 1, iXLen 31, %vs2, float %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fv.nxv2f64.iXLen.f64.iXLen(iXLen, , double, iXLen) +declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f32.nxv16f32.f32.iXLen(iXLen, iXLen, , float, iXLen) -define @test_f_sf_vc_v_fv_e64m4( %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_e64m4: +define @test_sf_vc_v_fvf_se_e32m8( %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvf_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv4f64.iXLen.f64.iXLen(iXLen 1, %vs2, double %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.fv.se.nxv16f32.nxv16f32.iXLen.f32(iXLen 1, %vs2, float %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fv.nxv4f64.iXLen.f64.iXLen(iXLen, , double, iXLen) - -define @test_f_sf_vc_v_fv_e64m8( %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fv_e64m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fv.nxv8f64.iXLen.f64.iXLen(iXLen 1, %vs2, double %fs1, iXLen %vl) - ret %0 -} +declare @llvm.riscv.sf.vc.v.fv.se.nxv16f32.nxv16f32.iXLen.f32(iXLen, , float, iXLen) -declare @llvm.riscv.sf.vc.v.fv.nxv8f64.iXLen.f64.iXLen(iXLen, , double, iXLen) diff --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll index ef2eee9e438e9..95fb51e3fa5b6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvv.ll @@ -2434,2780 +2434,1257 @@ entry: declare @llvm.riscv.sf.vc.v.ivv.nxv8i64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define void @test_sf_vc_fvv_se_e16mf4( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e16mf4: +define void @test_sf_vc_fvvv_se_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e16mf4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i16.f16.iXLen(iXLen, , , half, iXLen) - -define void @test_sf_vc_fvv_se_e16mf2( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e16mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i16.f16.iXLen(iXLen, , , half, iXLen) - -define void @test_sf_vc_fvv_se_e16m1( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i16.f16.iXLen(iXLen, , , half, iXLen) - -define void @test_sf_vc_fvv_se_e16m2( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e16m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i16.f16.iXLen(iXLen, , , half, iXLen) - -define void @test_sf_vc_fvv_se_e16m4( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e16m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i16.f16.iXLen(iXLen, , , half, iXLen) - -define void @test_sf_vc_fvv_se_e16m8( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e16m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v16, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32i16.f16.iXLen(iXLen, , , half, iXLen) - -define void @test_sf_vc_fvv_se_e32mf2( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e32mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i32.f32.iXLen(iXLen, , , float, iXLen) - -define void @test_sf_vc_fvv_se_e32m1( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e32m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i32.f32.iXLen(iXLen, , , float, iXLen) - -define void @test_sf_vc_fvv_se_e32m2( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e32m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i32.f32.iXLen(iXLen, , , float, iXLen) - -define void @test_sf_vc_fvv_se_e32m4( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e32m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i32.f32.iXLen(iXLen, , , float, iXLen) - -define void @test_sf_vc_fvv_se_e32m8( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e32m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v16, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i32.f32.iXLen(iXLen, , , float, iXLen) - -define void @test_sf_vc_fvv_se_e64m1( %vd, %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e64m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i64.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i64.f64.iXLen(iXLen, , , double, iXLen) - -define void @test_sf_vc_fvv_se_e64m2( %vd, %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e64m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i64.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i64.f64.iXLen(iXLen, , , double, iXLen) - -define void @test_sf_vc_fvv_se_e64m4( %vd, %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e64m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i64.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i64.f64.iXLen(iXLen, , , double, iXLen) - -define void @test_sf_vc_fvv_se_e64m8( %vd, %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvv_se_e64m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v16, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i64.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i64.f64.iXLen(iXLen, , , double, iXLen) - -define @test_sf_vc_v_fvv_se_e16mf4( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.iXLen.f16.iXLen(iXLen, , , half, iXLen) - -define @test_sf_vc_v_fvv_se_e16mf2( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.iXLen.f16.iXLen(iXLen, , , half, iXLen) - -define @test_sf_vc_v_fvv_se_e16m1( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.iXLen.f16.iXLen(iXLen, , , half, iXLen) - -define @test_sf_vc_v_fvv_se_e16m2( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.iXLen.f16.iXLen(iXLen, , , half, iXLen) - -define @test_sf_vc_v_fvv_se_e16m4( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.iXLen.f16.iXLen(iXLen, , , half, iXLen) - -define @test_sf_vc_v_fvv_se_e16m8( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.iXLen.f16.iXLen(iXLen, , , half, iXLen) - -define @test_sf_vc_v_fvv_se_e32mf2( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e32mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv1i32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.se.nxv1i32.iXLen.f32.iXLen(iXLen, , , float, iXLen) - -define @test_sf_vc_v_fvv_se_e32m1( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv2i32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.se.nxv2i32.iXLen.f32.iXLen(iXLen, , , float, iXLen) - -define @test_sf_vc_v_fvv_se_e32m2( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv4i32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.se.nxv4i32.iXLen.f32.iXLen(iXLen, , , float, iXLen) - -define @test_sf_vc_v_fvv_se_e32m4( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv8i32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.se.nxv8i32.iXLen.f32.iXLen(iXLen, , , float, iXLen) - -define @test_sf_vc_v_fvv_se_e32m8( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv16i32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.se.nxv16i32.iXLen.f32.iXLen(iXLen, , , float, iXLen) - -define @test_sf_vc_v_fvv_se_e64m1( %vd, %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv1i64.iXLen.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.se.nxv1i64.iXLen.f64.iXLen(iXLen, , , double, iXLen) - -define @test_sf_vc_v_fvv_se_e64m2( %vd, %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv2i64.iXLen.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.se.nxv2i64.iXLen.f64.iXLen(iXLen, , , double, iXLen) - -define @test_sf_vc_v_fvv_se_e64m4( %vd, %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv4i64.iXLen.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.se.nxv4i64.iXLen.f64.iXLen(iXLen, , , double, iXLen) - -define @test_sf_vc_v_fvv_se_e64m8( %vd, %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv8i64.iXLen.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.se.nxv8i64.iXLen.f64.iXLen(iXLen, , , double, iXLen) - -define @test_sf_vc_v_fvv_e16mf4( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e16mf4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv1i16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.nxv1i16.iXLen.f16.iXLen(iXLen, , , half, iXLen) - -define @test_sf_vc_v_fvv_e16mf2( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e16mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv2i16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.nxv2i16.iXLen.f16.iXLen(iXLen, , , half, iXLen) - -define @test_sf_vc_v_fvv_e16m1( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv4i16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.nxv4i16.iXLen.f16.iXLen(iXLen, , , half, iXLen) - -define @test_sf_vc_v_fvv_e16m2( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e16m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv8i16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.nxv8i16.iXLen.f16.iXLen(iXLen, , , half, iXLen) - -define @test_sf_vc_v_fvv_e16m4( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e16m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv16i16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.nxv16i16.iXLen.f16.iXLen(iXLen, , , half, iXLen) - -define @test_sf_vc_v_fvv_e16m8( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e16m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv32i16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.nxv32i16.iXLen.f16.iXLen(iXLen, , , half, iXLen) - -define @test_sf_vc_v_fvv_e32mf2( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e32mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv1i32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.nxv1i32.iXLen.f32.iXLen(iXLen, , , float, iXLen) - -define @test_sf_vc_v_fvv_e32m1( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e32m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv2i32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.nxv2i32.iXLen.f32.iXLen(iXLen, , , float, iXLen) - -define @test_sf_vc_v_fvv_e32m2( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e32m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv4i32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.nxv4i32.iXLen.f32.iXLen(iXLen, , , float, iXLen) - -define @test_sf_vc_v_fvv_e32m4( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e32m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv8i32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.nxv8i32.iXLen.f32.iXLen(iXLen, , , float, iXLen) - -define @test_sf_vc_v_fvv_e32m8( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e32m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv16i32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.nxv16i32.iXLen.f32.iXLen(iXLen, , , float, iXLen) - -define @test_sf_vc_v_fvv_e64m1( %vd, %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e64m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv1i64.iXLen.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.nxv1i64.iXLen.f64.iXLen(iXLen, , , double, iXLen) - -define @test_sf_vc_v_fvv_e64m2( %vd, %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e64m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv2i64.iXLen.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.nxv2i64.iXLen.f64.iXLen(iXLen, , , double, iXLen) - -define @test_sf_vc_v_fvv_e64m4( %vd, %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e64m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv4i64.iXLen.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.nxv4i64.iXLen.f64.iXLen(iXLen, , , double, iXLen) - -define @test_sf_vc_v_fvv_e64m8( %vd, %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvv_e64m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv8i64.iXLen.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvv.nxv8i64.iXLen.f64.iXLen(iXLen, , , double, iXLen) - -define void @test_f_sf_vc_vvv_se_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vvv_se_e16mf4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f16.nxv1f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f16.nxv1f16.iXLen(iXLen, , , , iXLen) - -define void @test_f_sf_vc_vvv_se_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vvv_se_e16mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f16.nxv2f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f16.nxv2f16.iXLen(iXLen, , , , iXLen) - -define void @test_f_sf_vc_vvv_se_e16m1( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vvv_se_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f16.nxv4f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f16.nxv4f16.iXLen(iXLen, , , , iXLen) - -define void @test_f_sf_vc_vvv_se_e16m2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vvv_se_e16m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f16.nxv8f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f16.nxv8f16.iXLen(iXLen, , , , iXLen) - -define void @test_f_sf_vc_vvv_se_e16m4( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vvv_se_e16m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f16.nxv16f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f16.nxv16f16.iXLen(iXLen, , , , iXLen) - -define void @test_f_sf_vc_vvv_se_e16m8( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vvv_se_e16m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32f16.nxv32f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32f16.nxv32f16.iXLen(iXLen, , , , iXLen) - -define void @test_f_sf_vc_vvv_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vvv_se_e32mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f32.nxv1f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f32.nxv1f32.iXLen(iXLen, , , , iXLen) - -define void @test_f_sf_vc_vvv_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vvv_se_e32m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f32.nxv2f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f32.nxv2f32.iXLen(iXLen, , , , iXLen) - -define void @test_f_sf_vc_vvv_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vvv_se_e32m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f32.nxv4f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f32.nxv4f32.iXLen(iXLen, , , , iXLen) - -define void @test_f_sf_vc_vvv_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vvv_se_e32m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f32.nxv8f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f32.nxv8f32.iXLen(iXLen, , , , iXLen) - -define void @test_f_sf_vc_vvv_se_e32m8( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vvv_se_e32m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f32.nxv16f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f32.nxv16f32.iXLen(iXLen, , , , iXLen) - -define void @test_f_sf_vc_vvv_se_e64m1( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vvv_se_e64m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f64.nxv1f64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f64.nxv1f64.iXLen(iXLen, , , , iXLen) - -define void @test_f_sf_vc_vvv_se_e64m2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vvv_se_e64m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f64.nxv2f64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f64.nxv2f64.iXLen(iXLen, , , , iXLen) - -define void @test_f_sf_vc_vvv_se_e64m4( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vvv_se_e64m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f64.nxv4f64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f64.nxv4f64.iXLen(iXLen, , , , iXLen) - -define void @test_f_sf_vc_vvv_se_e64m8( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vvv_se_e64m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f64.nxv8f64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f64.nxv8f64.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_se_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e16mf4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv1f16.iXLen.nxv1f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.se.nxv1f16.iXLen.nxv1f16.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_se_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e16mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv2f16.iXLen.nxv2f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.se.nxv2f16.iXLen.nxv2f16.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_se_e16m1( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv4f16.iXLen.nxv4f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.se.nxv4f16.iXLen.nxv4f16.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_se_e16m2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e16m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv8f16.iXLen.nxv8f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.se.nxv8f16.iXLen.nxv8f16.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_se_e16m4( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e16m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv16f16.iXLen.nxv16f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.se.nxv16f16.iXLen.nxv16f16.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_se_e16m8( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e16m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv32f16.iXLen.nxv32f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.se.nxv32f16.iXLen.nxv32f16.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e32mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv1f32.iXLen.nxv1f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.se.nxv1f32.iXLen.nxv1f32.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e32m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv2f32.iXLen.nxv2f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.se.nxv2f32.iXLen.nxv2f32.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e32m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv4f32.iXLen.nxv4f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.se.nxv4f32.iXLen.nxv4f32.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e32m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv8f32.iXLen.nxv8f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.se.nxv8f32.iXLen.nxv8f32.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_se_e32m8( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e32m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv16f32.iXLen.nxv16f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.se.nxv16f32.iXLen.nxv16f32.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_se_e64m1( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e64m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv1f64.iXLen.nxv1f64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.se.nxv1f64.iXLen.nxv1f64.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_se_e64m2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e64m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv2f64.iXLen.nxv2f64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.se.nxv2f64.iXLen.nxv2f64.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_se_e64m4( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e64m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv4f64.iXLen.nxv4f64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.se.nxv4f64.iXLen.nxv4f64.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_se_e64m8( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e64m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv8f64.iXLen.nxv8f64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.se.nxv8f64.iXLen.nxv8f64.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_e16mf4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv1f16.iXLen.nxv1f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.nxv1f16.iXLen.nxv1f16.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_e16mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv2f16.iXLen.nxv2f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.nxv2f16.iXLen.nxv2f16.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_e16m1( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv4f16.iXLen.nxv4f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.nxv4f16.iXLen.nxv4f16.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_e16m2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_e16m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv8f16.iXLen.nxv8f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.nxv8f16.iXLen.nxv8f16.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_e16m4( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_e16m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv16f16.iXLen.nxv16f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.nxv16f16.iXLen.nxv16f16.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_e16m8( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_e16m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv32f16.iXLen.nxv32f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.nxv32f16.iXLen.nxv32f16.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_e32mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv1f32.iXLen.nxv1f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.nxv1f32.iXLen.nxv1f32.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_e32m1( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_e32m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv2f32.iXLen.nxv2f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.nxv2f32.iXLen.nxv2f32.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_e32m2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_e32m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv4f32.iXLen.nxv4f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.nxv4f32.iXLen.nxv4f32.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_e32m4( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_e32m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv8f32.iXLen.nxv8f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.nxv8f32.iXLen.nxv8f32.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_e32m8( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_e32m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv16f32.iXLen.nxv16f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.nxv16f32.iXLen.nxv16f32.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_e64m1( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_e64m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv1f64.iXLen.nxv1f64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.nxv1f64.iXLen.nxv1f64.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_e64m2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_e64m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv2f64.iXLen.nxv2f64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.nxv2f64.iXLen.nxv2f64.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_e64m4( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_e64m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv4f64.iXLen.nxv4f64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.nxv4f64.iXLen.nxv4f64.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvv_e64m8( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvv_e64m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvv.nxv8f64.iXLen.nxv8f64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvv.nxv8f64.iXLen.nxv8f64.iXLen(iXLen, , , , iXLen) - -define void @test_f_sf_vc_xvv_se_e16mf4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xvv_se_e16mf4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f16.f16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f16.f16.iXLen(iXLen, , , i16, iXLen) - -define void @test_f_sf_vc_xvv_se_e16mf2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xvv_se_e16mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f16.f16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f16.f16.iXLen(iXLen, , , i16, iXLen) - -define void @test_f_sf_vc_xvv_se_e16m1( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xvv_se_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f16.f16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f16.f16.iXLen(iXLen, , , i16, iXLen) - -define void @test_f_sf_vc_xvv_se_e16m2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xvv_se_e16m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.xvv 3, v8, v10, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f16.f16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f16.f16.iXLen(iXLen, , , i16, iXLen) - -define void @test_f_sf_vc_xvv_se_e16m4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xvv_se_e16m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.xvv 3, v8, v12, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f16.f16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f16.f16.iXLen(iXLen, , , i16, iXLen) - -define void @test_f_sf_vc_xvv_se_e16m8( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xvv_se_e16m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.xvv 3, v8, v16, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32f16.f16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32f16.f16.iXLen(iXLen, , , i16, iXLen) - -define void @test_f_sf_vc_xvv_se_e32mf2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xvv_se_e32mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xvv.se.f32.nxv1f32.iXLen.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xvv.se.f32.nxv1f32.iXLen.iXLen(iXLen, , , i32, iXLen) - -define void @test_f_sf_vc_xvv_se_e32m1( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xvv_se_e32m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xvv.se.f32.nxv2f32.iXLen.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xvv.se.f32.nxv2f32.iXLen.iXLen(iXLen, , , i32, iXLen) - -define void @test_f_sf_vc_xvv_se_e32m2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xvv_se_e32m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.xvv 3, v8, v10, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xvv.se.f32.nxv4f32.iXLen.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xvv.se.f32.nxv4f32.iXLen.iXLen(iXLen, , , i32, iXLen) - -define void @test_f_sf_vc_xvv_se_e32m4( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xvv_se_e32m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.xvv 3, v8, v12, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xvv.se.f32.nxv8f32.iXLen.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xvv.se.f32.nxv8f32.iXLen.iXLen(iXLen, , , i32, iXLen) - -define void @test_f_sf_vc_xvv_se_e32m8( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xvv_se_e32m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.xvv 3, v8, v16, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xvv.se.f32.nxv16f32.iXLen.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xvv.se.f32.nxv16f32.iXLen.iXLen(iXLen, , , i32, iXLen) - -define @test_f_sf_vc_v_xvv_se_e16mf4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e16mf4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv1f16.iXLen.f16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvv.se.nxv1f16.iXLen.f16.iXLen(iXLen, , , i16, iXLen) - -define @test_f_sf_vc_v_xvv_se_e16mf2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e16mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv2f16.iXLen.f16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvv.se.nxv2f16.iXLen.f16.iXLen(iXLen, , , i16, iXLen) - -define @test_f_sf_vc_v_xvv_se_e16m1( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv4f16.iXLen.f16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvv.se.nxv4f16.iXLen.f16.iXLen(iXLen, , , i16, iXLen) - -define @test_f_sf_vc_v_xvv_se_e16m2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e16m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv8f16.iXLen.f16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvv.se.nxv8f16.iXLen.f16.iXLen(iXLen, , , i16, iXLen) - -define @test_f_sf_vc_v_xvv_se_e16m4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e16m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv16f16.iXLen.f16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvv.se.nxv16f16.iXLen.f16.iXLen(iXLen, , , i16, iXLen) - -define @test_f_sf_vc_v_xvv_se_e16m8( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e16m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv32f16.iXLen.f16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvv.se.nxv32f16.iXLen.f16.iXLen(iXLen, , , i16, iXLen) - -define @test_f_sf_vc_v_xvv_se_e32mf2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e32mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv1f32.iXLen.f32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvv.se.nxv1f32.iXLen.f32.iXLen(iXLen, , , i32, iXLen) - -define @test_f_sf_vc_v_xvv_se_e32m1( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e32m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv2f32.iXLen.f32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvv.se.nxv2f32.iXLen.f32.iXLen(iXLen, , , i32, iXLen) - -define @test_f_sf_vc_v_xvv_se_e32m2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e32m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv4f32.iXLen.f32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvv.se.nxv4f32.iXLen.f32.iXLen(iXLen, , , i32, iXLen) - -define @test_f_sf_vc_v_xvv_se_e32m4( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e32m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv8f32.iXLen.f32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvv.se.nxv8f32.iXLen.f32.iXLen(iXLen, , , i32, iXLen) - -define @test_f_sf_vc_v_xvv_se_e32m8( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e32m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv16f32.iXLen.f32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvv.se.nxv16f32.iXLen.f32.iXLen(iXLen, , , i32, iXLen) - -define @test_f_sf_vc_v_xvv_e16mf4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvv_e16mf4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv1f16.iXLen.f16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvv.nxv1f16.iXLen.f16.iXLen(iXLen, , , i16, iXLen) - -define @test_f_sf_vc_v_xvv_e16mf2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvv_e16mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv2f16.iXLen.f16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvv.nxv2f16.iXLen.f16.iXLen(iXLen, , , i16, iXLen) - -define @test_f_sf_vc_v_xvv_e16m1( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvv_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv4f16.iXLen.f16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvv.nxv4f16.iXLen.f16.iXLen(iXLen, , , i16, iXLen) - -define @test_f_sf_vc_v_xvv_e16m2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvv_e16m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv8f16.iXLen.f16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvv.nxv8f16.iXLen.f16.iXLen(iXLen, , , i16, iXLen) - -define @test_f_sf_vc_v_xvv_e16m4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvv_e16m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv16f16.iXLen.f16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvv.nxv16f16.iXLen.f16.iXLen(iXLen, , , i16, iXLen) - -define @test_f_sf_vc_v_xvv_e16m8( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvv_e16m8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv32f16.iXLen.f16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvv.nxv32f16.iXLen.f16.iXLen(iXLen, , , i16, iXLen) - -define @test_f_sf_vc_v_xvv_e32mf2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvv_e32mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv1f32.iXLen.f32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvv.nxv1f32.iXLen.f32.iXLen(iXLen, , , i32, iXLen) - -define @test_f_sf_vc_v_xvv_e32m1( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvv_e32m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv2f32.iXLen.f32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvv.nxv2f32.iXLen.f32.iXLen(iXLen, , , i32, iXLen) - -define @test_f_sf_vc_v_xvv_e32m2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvv_e32m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv4f32.iXLen.f32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvv.nxv4f32.iXLen.f32.iXLen(iXLen, , , i32, iXLen) - -define @test_f_sf_vc_v_xvv_e32m4( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvv_e32m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0 +; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv8f32.iXLen.f32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f16.nxv1i16.nxv1i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.xvv.nxv8f32.iXLen.f32.iXLen(iXLen, , , i32, iXLen) +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f16.nxv1i16.nxv1i16.iXLen(iXLen, , , , iXLen) -define @test_f_sf_vc_v_xvv_e32m8( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvv_e32m8: +define @test_sf_vc_fv_fvv_se_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvv.nxv16f32.iXLen.f32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv1f16.nxv1i16.nxv1i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.xvv.nxv16f32.iXLen.f32.iXLen(iXLen, , , i32, iXLen) +declare @llvm.riscv.sf.vc.v.vvv.se.nxv1f16.nxv1i16.nxv1i16.iXLen(iXLen, , , , iXLen) -define void @test_f_sf_vc_ivv_se_e16mf4( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_ivv_se_e16mf4: +define void @test_sf_vc_fvvv_se_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e16mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f16.nxv2i16.nxv2i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f16.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f16.nxv2i16.nxv2i16.iXLen(iXLen, , , , iXLen) -define void @test_f_sf_vc_ivv_se_e16mf2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_ivv_se_e16mf2: +define @test_sf_vc_fv_fvv_se_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10 +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv2f16.nxv2i16.nxv2i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f16.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.vvv.se.nxv2f16.nxv2i16.nxv2i16.iXLen(iXLen, , , , iXLen) -define void @test_f_sf_vc_ivv_se_e16m1( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_ivv_se_e16m1: +define void @test_sf_vc_fvvv_se_e16m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e16m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10 +; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f16.nxv4i16.nxv4i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f16.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f16.nxv4i16.nxv4i16.iXLen(iXLen, , , , iXLen) -define void @test_f_sf_vc_ivv_se_e16m2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_ivv_se_e16m2: +define @test_sf_vc_fv_fvv_se_e16m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 10 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv4f16.nxv4i16.nxv4i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f16.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.vvv.se.nxv4f16.nxv4i16.nxv4i16.iXLen(iXLen, , , , iXLen) -define void @test_f_sf_vc_ivv_se_e16m4( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_ivv_se_e16m4: +define void @test_sf_vc_fvvv_se_e16m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e16m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 10 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f16.nxv8i16.nxv8i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f16.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f16.nxv8i16.nxv8i16.iXLen(iXLen, , , , iXLen) -define void @test_f_sf_vc_ivv_se_e16m8( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_ivv_se_e16m8: +define @test_sf_vc_fv_fvv_se_e16m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.ivv 3, v8, v16, 10 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32f16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv8f16.nxv8i16.nxv8i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32f16.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.vvv.se.nxv8f16.nxv8i16.nxv8i16.iXLen(iXLen, , , , iXLen) -define void @test_f_sf_vc_ivv_se_e32mf2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_ivv_se_e32mf2: +define void @test_sf_vc_fvvv_se_e16m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e16m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f16.nxv16i16.nxv16i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f32.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f16.nxv16i16.nxv16i16.iXLen(iXLen, , , , iXLen) -define void @test_f_sf_vc_ivv_se_e32m1( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_ivv_se_e32m1: +define @test_sf_vc_fv_fvv_se_e16m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv16f16.nxv16i16.nxv16i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f32.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.vvv.se.nxv16f16.nxv16i16.nxv16i16.iXLen(iXLen, , , , iXLen) -define void @test_f_sf_vc_ivv_se_e32m2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_ivv_se_e32m2: +define void @test_sf_vc_fvvv_se_e16m8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e16m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 10 +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32f16.nxv32i16.nxv32i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f32.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32f16.nxv32i16.nxv32i16.iXLen(iXLen, , , , iXLen) -define void @test_f_sf_vc_ivv_se_e32m4( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_ivv_se_e32m4: +define @test_sf_vc_fv_fvv_se_e16m8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 10 +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv32f16.nxv32i16.nxv32i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f32.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.vvv.se.nxv32f16.nxv32i16.nxv32i16.iXLen(iXLen, , , , iXLen) -define void @test_f_sf_vc_ivv_se_e32m8( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_ivv_se_e32m8: +define void @test_sf_vc_fvvv_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.ivv 3, v8, v16, 10 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f32.nxv1i32.nxv1i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f32.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f32.nxv1i32.nxv1i32.iXLen(iXLen, , , , iXLen) -define void @test_f_sf_vc_ivv_se_e64m1( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_ivv_se_e64m1: +define @test_sf_vc_fv_fvv_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f64.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv1f32.nxv1i32.nxv1i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f64.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.vvv.se.nxv1f32.nxv1i32.nxv1i32.iXLen(iXLen, , , , iXLen) -define void @test_f_sf_vc_ivv_se_e64m2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_ivv_se_e64m2: +define void @test_sf_vc_fvvv_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 10 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f64.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f32.nxv2i32.nxv2i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f64.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f32.nxv2i32.nxv2i32.iXLen(iXLen, , , , iXLen) -define void @test_f_sf_vc_ivv_se_e64m4( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_ivv_se_e64m4: +define @test_sf_vc_fv_fvv_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 10 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f64.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv2f32.nxv2i32.nxv2i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f64.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.vvv.se.nxv2f32.nxv2i32.nxv2i32.iXLen(iXLen, , , , iXLen) -define void @test_f_sf_vc_ivv_se_e64m8( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_ivv_se_e64m8: +define void @test_sf_vc_fvvv_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: sf.vc.ivv 3, v8, v16, 10 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f64.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f32.nxv4i32.nxv4i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f64.iXLen.iXLen(iXLen, , , iXLen, iXLen) - -define @test_f_sf_vc_v_ivv_se_e16mf4( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e16mf4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv1f16.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.ivv.se.nxv1f16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f32.nxv4i32.nxv4i32.iXLen(iXLen, , , , iXLen) -define @test_f_sf_vc_v_ivv_se_e16mf2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e16mf2: +define @test_sf_vc_fv_fvv_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv2f16.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv4f32.nxv4i32.nxv4i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv2f16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.vvv.se.nxv4f32.nxv4i32.nxv4i32.iXLen(iXLen, , , , iXLen) -define @test_f_sf_vc_v_ivv_se_e16m1( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e16m1: +define void @test_sf_vc_fvvv_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv4f16.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f32.nxv8i32.nxv8i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv4f16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f32.nxv8i32.nxv8i32.iXLen(iXLen, , , , iXLen) -define @test_f_sf_vc_v_ivv_se_e16m2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e16m2: +define @test_sf_vc_fv_fvv_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv8f16.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv8f32.nxv8i32.nxv8i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv8f16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.vvv.se.nxv8f32.nxv8i32.nxv8i32.iXLen(iXLen, , , , iXLen) -define @test_f_sf_vc_v_ivv_se_e16m4( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e16m4: +define void @test_sf_vc_fvvv_se_e32m8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10 +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv16f16.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f32.nxv16i32.nxv16i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv16f16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f32.nxv16i32.nxv16i32.iXLen(iXLen, , , , iXLen) -define @test_f_sf_vc_v_ivv_se_e16m8( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e16m8: +define @test_sf_vc_fv_fvv_se_e32m8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10 +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv32f16.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv16f32.nxv16i32.nxv16i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv32f16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.vvv.se.nxv16f32.nxv16i32.nxv16i32.iXLen(iXLen, , , , iXLen) -define @test_f_sf_vc_v_ivv_se_e32mf2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e32mf2: +define void @test_sf_vc_fvvv_se_e64m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e64m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv1f32.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f64.nxv1i64.nxv1i64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv1f32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f64.nxv1i64.nxv1i64.iXLen(iXLen, , , , iXLen) -define @test_f_sf_vc_v_ivv_se_e32m1( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e32m1: +define @test_sf_vc_fv_fvv_se_e64m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv2f32.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv1f64.nxv1i64.nxv1i64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv2f32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.vvv.se.nxv1f64.nxv1i64.nxv1i64.iXLen(iXLen, , , , iXLen) -define @test_f_sf_vc_v_ivv_se_e32m2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e32m2: +define void @test_sf_vc_fvvv_se_e64m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e64m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv4f32.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f64.nxv2i64.nxv2i64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv4f32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f64.nxv2i64.nxv2i64.iXLen(iXLen, , , , iXLen) -define @test_f_sf_vc_v_ivv_se_e32m4( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e32m4: +define @test_sf_vc_fv_fvv_se_e64m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv8f32.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv2f64.nxv2i64.nxv2i64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv8f32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.vvv.se.nxv2f64.nxv2i64.nxv2i64.iXLen(iXLen, , , , iXLen) -define @test_f_sf_vc_v_ivv_se_e32m8( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e32m8: +define void @test_sf_vc_fvvv_se_e64m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e64m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv16f32.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f64.nxv4i64.nxv4i64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv16f32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f64.nxv4i64.nxv4i64.iXLen(iXLen, , , , iXLen) -define @test_f_sf_vc_v_ivv_se_e64m1( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e64m1: +define @test_sf_vc_fv_fvv_se_e64m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv1f64.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv4f64.nxv4i64.nxv4i64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv1f64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.vvv.se.nxv4f64.nxv4i64.nxv4i64.iXLen(iXLen, , , , iXLen) -define @test_f_sf_vc_v_ivv_se_e64m2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e64m2: +define void @test_sf_vc_fvvv_se_e64m8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvv_se_e64m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10 +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv2f64.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f64.nxv8i64.nxv8i64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv2f64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f64.nxv8i64.nxv8i64.iXLen(iXLen, , , , iXLen) -define @test_f_sf_vc_v_ivv_se_e64m4( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e64m4: +define @test_sf_vc_fv_fvv_se_e64m8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10 +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv4f64.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.vvv.se.nxv8f64.nxv8i64.nxv8i64.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv4f64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.vvv.se.nxv8f64.nxv8i64.nxv8i64.iXLen(iXLen, , , , iXLen) -define @test_f_sf_vc_v_ivv_se_e64m8( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e64m8: +define void @test_sf_vc_fvvx_se_e16mf4( %vd, %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvx_se_e16mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv8f64.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f16.nxv1i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.ivv.se.nxv8f64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f16.nxv1i16.i16.iXLen(iXLen, , , i16, iXLen) -define @test_f_sf_vc_v_ivv_e16mf4( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_e16mf4: +define @test_sf_vc_v_fvvx_se_e16mf4( %vd, %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv1f16.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv1f16.nxv1f16.nxv1i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv1f16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xvv.se.nxv1f16.nxv1f16.nxv1i16.i16.iXLen(iXLen, , , i16, iXLen) -define @test_f_sf_vc_v_ivv_e16mf2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_e16mf2: +define void @test_sf_vc_fvvx_se_e16mf2( %vd, %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvx_se_e16mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv2f16.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f16.nxv2i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.ivv.nxv2f16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f16.nxv2i16.i16.iXLen(iXLen, , , i16, iXLen) -define @test_f_sf_vc_v_ivv_e16m1( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_e16m1: +define @test_sf_vc_v_fvvx_se_e16mf2( %vd, %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv4f16.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv2f16.nxv2f16.nxv2i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv4f16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xvv.se.nxv2f16.nxv2f16.nxv2i16.i16.iXLen(iXLen, , , i16, iXLen) -define @test_f_sf_vc_v_ivv_e16m2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_e16m2: +define void @test_sf_vc_fvvx_se_e16m1( %vd, %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvx_se_e16m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv8f16.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f16.nxv4i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.ivv.nxv8f16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f16.nxv4i16.i16.iXLen(iXLen, , , i16, iXLen) -define @test_f_sf_vc_v_ivv_e16m4( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_e16m4: +define @test_sf_vc_v_fvvx_se_e16m1( %vd, %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv16f16.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv4f16.nxv4f16.nxv4i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv16f16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xvv.se.nxv4f16.nxv4f16.nxv4i16.i16.iXLen(iXLen, , , i16, iXLen) -define @test_f_sf_vc_v_ivv_e16m8( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_e16m8: +define void @test_sf_vc_fvvx_se_e16m2( %vd, %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvx_se_e16m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v10, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv32f16.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f16.nxv8i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.ivv.nxv32f16.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f16.nxv8i16.i16.iXLen(iXLen, , , i16, iXLen) -define @test_f_sf_vc_v_ivv_e32mf2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_e32mf2: +define @test_sf_vc_v_fvvx_se_e16m2( %vd, %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv1f32.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv8f16.nxv8f16.nxv8i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv1f32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xvv.se.nxv8f16.nxv8f16.nxv8i16.i16.iXLen(iXLen, , , i16, iXLen) -define @test_f_sf_vc_v_ivv_e32m1( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_e32m1: +define void @test_sf_vc_fvvx_se_e16m4( %vd, %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvx_se_e16m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v12, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv2f32.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f16.nxv16i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.ivv.nxv2f32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f16.nxv16i16.i16.iXLen(iXLen, , , i16, iXLen) -define @test_f_sf_vc_v_ivv_e32m2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_e32m2: +define @test_sf_vc_v_fvvx_se_e16m4( %vd, %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv4f32.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv16f16.nxv16f16.nxv16i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv4f32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xvv.se.nxv16f16.nxv16f16.nxv16i16.i16.iXLen(iXLen, , , i16, iXLen) -define @test_f_sf_vc_v_ivv_e32m4( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_e32m4: +define void @test_sf_vc_fvvx_se_e16m8( %vd, %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvx_se_e16m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v16, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv8f32.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32f16.nxv32i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.ivv.nxv8f32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32f16.nxv32i16.i16.iXLen(iXLen, , , i16, iXLen) -define @test_f_sf_vc_v_ivv_e32m8( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_e32m8: +define @test_sf_vc_v_fvvx_se_e16m8( %vd, %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv16f32.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv32f16.nxv32f16.nxv32i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv16f32.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xvv.se.nxv32f16.nxv32f16.nxv32i16.i16.iXLen(iXLen, , , i16, iXLen) -define @test_f_sf_vc_v_ivv_e64m1( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_e64m1: +define void @test_sf_vc_fvvx_se_e32mf2( %vd, %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvx_se_e32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv1f64.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f32.nxv1i32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.ivv.nxv1f64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f32.nxv1i32.i32.iXLen(iXLen, , , i32, iXLen) -define @test_f_sf_vc_v_ivv_e64m2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_e64m2: +define @test_sf_vc_v_fvvx_se_e32mf2( %vd, %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv2f64.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv1f32.nxv1f32.nxv1i32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv2f64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xvv.se.nxv1f32.nxv1f32.nxv1i32.i32.iXLen(iXLen, , , i32, iXLen) -define @test_f_sf_vc_v_ivv_e64m4( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_e64m4: +define void @test_sf_vc_fvvx_se_e32m1( %vd, %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvx_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv4f64.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f32.nxv2i32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.ivv.nxv4f64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f32.nxv2i32.i32.iXLen(iXLen, , , i32, iXLen) -define @test_f_sf_vc_v_ivv_e64m8( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivv_e64m8: +define @test_sf_vc_v_fvvx_se_e32m1( %vd, %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivv.nxv8f64.iXLen.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv2f32.nxv2f32.nxv2i32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.ivv.nxv8f64.iXLen.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xvv.se.nxv2f32.nxv2f32.nxv2i32.i32.iXLen(iXLen, , , i32, iXLen) -define void @test_f_sf_vc_fvv_se_e16mf4( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fvv_se_e16mf4: +define void @test_sf_vc_fvvx_se_e32m2( %vd, %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvx_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v10, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f32.nxv4i32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f16.f16.iXLen(iXLen, , , half, iXLen) +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f32.nxv4i32.i32.iXLen(iXLen, , , i32, iXLen) -define void @test_f_sf_vc_fvv_se_e16mf2( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fvv_se_e16mf2: +define @test_sf_vc_v_fvvx_se_e32m2( %vd, %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv4f32.nxv4f32.nxv4i32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f16.f16.iXLen(iXLen, , , half, iXLen) +declare @llvm.riscv.sf.vc.v.xvv.se.nxv4f32.nxv4f32.nxv4i32.i32.iXLen(iXLen, , , i32, iXLen) -define void @test_f_sf_vc_fvv_se_e16m1( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fvv_se_e16m1: +define void @test_sf_vc_fvvx_se_e32m4( %vd, %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvx_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v12, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f32.nxv8i32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f16.f16.iXLen(iXLen, , , half, iXLen) +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f32.nxv8i32.i32.iXLen(iXLen, , , i32, iXLen) -define void @test_f_sf_vc_fvv_se_e16m2( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fvv_se_e16m2: +define @test_sf_vc_v_fvvx_se_e32m4( %vd, %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv8f32.nxv8f32.nxv8i32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f16.f16.iXLen(iXLen, , , half, iXLen) +declare @llvm.riscv.sf.vc.v.xvv.se.nxv8f32.nxv8f32.nxv8i32.i32.iXLen(iXLen, , , i32, iXLen) -define void @test_f_sf_vc_fvv_se_e16m4( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fvv_se_e16m4: +define void @test_sf_vc_fvvx_se_e32m8( %vd, %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvx_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0 +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.xvv 3, v8, v16, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f32.nxv16i32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f16.f16.iXLen(iXLen, , , half, iXLen) +declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f32.nxv16i32.i32.iXLen(iXLen, , , i32, iXLen) -define void @test_f_sf_vc_fvv_se_e16m8( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fvv_se_e16m8: +define @test_sf_vc_v_fvvx_se_e32m8( %vd, %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v16, fa0 +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32f16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.xvv.se.nxv16f32.nxv16f32.nxv16i32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32f16.f16.iXLen(iXLen, , , half, iXLen) +declare @llvm.riscv.sf.vc.v.xvv.se.nxv16f32.nxv16f32.nxv16i32.i32.iXLen(iXLen, , , i32, iXLen) -define void @test_f_sf_vc_fvv_se_e32mf2( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fvv_se_e32mf2: +define void @test_sf_vc_fvvi_se_e16mf4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvi_se_e16mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f16.nxv1i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f32.f32.iXLen(iXLen, , , float, iXLen) +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f16.nxv1i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define void @test_f_sf_vc_fvv_se_e32m1( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fvv_se_e32m1: +define @test_sf_vc_fv_fvvi_se_e16mf4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv1f16.nxv1f16.nxv1i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f32.f32.iXLen(iXLen, , , float, iXLen) +declare @llvm.riscv.sf.vc.v.ivv.se.nxv1f16.nxv1f16.nxv1i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define void @test_f_sf_vc_fvv_se_e32m2( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fvv_se_e32m2: +define void @test_sf_vc_fvvi_se_e16mf2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvi_se_e16mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f16.nxv2i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f32.f32.iXLen(iXLen, , , float, iXLen) +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f16.nxv2i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define void @test_f_sf_vc_fvv_se_e32m4( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fvv_se_e32m4: +define @test_sf_vc_fv_fvvi_se_e16mf2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv2f16.nxv2f16.nxv2i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f32.f32.iXLen(iXLen, , , float, iXLen) +declare @llvm.riscv.sf.vc.v.ivv.se.nxv2f16.nxv2f16.nxv2i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define void @test_f_sf_vc_fvv_se_e32m8( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fvv_se_e32m8: +define void @test_sf_vc_fvvi_se_e16m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvi_se_e16m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f16.nxv4i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f32.f32.iXLen(iXLen, , , float, iXLen) +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f16.nxv4i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define void @test_f_sf_vc_fvv_se_e64m1( %vd, %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fvv_se_e64m1: +define @test_sf_vc_fv_fvvi_se_e16m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f64.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv4f16.nxv4f16.nxv4i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f64.f64.iXLen(iXLen, , , double, iXLen) +declare @llvm.riscv.sf.vc.v.ivv.se.nxv4f16.nxv4f16.nxv4i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define void @test_f_sf_vc_fvv_se_e64m2( %vd, %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fvv_se_e64m2: +define void @test_sf_vc_fvvi_se_e16m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvi_se_e16m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 3 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f64.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f16.nxv8i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f64.f64.iXLen(iXLen, , , double, iXLen) +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f16.nxv8i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define void @test_f_sf_vc_fvv_se_e64m4( %vd, %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fvv_se_e64m4: +define @test_sf_vc_fv_fvvi_se_e16m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 3 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f64.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv8f16.nxv8f16.nxv8i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f64.f64.iXLen(iXLen, , , double, iXLen) +declare @llvm.riscv.sf.vc.v.ivv.se.nxv8f16.nxv8f16.nxv8i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define void @test_f_sf_vc_fvv_se_e64m8( %vd, %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fvv_se_e64m8: +define void @test_sf_vc_fvvi_se_e16m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvi_se_e16m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: sf.vc.fvv 1, v8, v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 3 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f64.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f16.nxv16i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f64.f64.iXLen(iXLen, , , double, iXLen) +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f16.nxv16i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define @test_f_sf_vc_v_fvv_se_e16mf4( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e16mf4: +define @test_sf_vc_fv_fvvi_se_e16m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 3 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv1f16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv16f16.nxv16f16.nxv16i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv1f16.iXLen.f16.iXLen(iXLen, , , half, iXLen) +declare @llvm.riscv.sf.vc.v.ivv.se.nxv16f16.nxv16f16.nxv16i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define @test_f_sf_vc_v_fvv_se_e16mf2( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e16mf2: +define void @test_sf_vc_fvvi_se_e16m8( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvi_se_e16m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v16, 3 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv2f16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32f16.nxv32i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv2f16.iXLen.f16.iXLen(iXLen, , , half, iXLen) +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32f16.nxv32i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define @test_f_sf_vc_v_fvv_se_e16m1( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e16m1: +define @test_sf_vc_fv_fvvi_se_e16m8( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 3 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv4f16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv32f16.nxv32f16.nxv32i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv4f16.iXLen.f16.iXLen(iXLen, , , half, iXLen) +declare @llvm.riscv.sf.vc.v.ivv.se.nxv32f16.nxv32f16.nxv32i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define @test_f_sf_vc_v_fvv_se_e16m2( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e16m2: +define void @test_sf_vc_fvvi_se_e32mf2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvi_se_e32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv8f16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f32.nxv1i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv8f16.iXLen.f16.iXLen(iXLen, , , half, iXLen) +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f32.nxv1i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define @test_f_sf_vc_v_fvv_se_e16m4( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e16m4: +define @test_sf_vc_fv_fvvi_se_e32mf2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv16f16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv1f32.nxv1f32.nxv1i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv16f16.iXLen.f16.iXLen(iXLen, , , half, iXLen) +declare @llvm.riscv.sf.vc.v.ivv.se.nxv1f32.nxv1f32.nxv1i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define @test_f_sf_vc_v_fvv_se_e16m8( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e16m8: +define void @test_sf_vc_fvvi_se_e32m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvi_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv32f16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f32.nxv2i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv32f16.iXLen.f16.iXLen(iXLen, , , half, iXLen) +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f32.nxv2i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define @test_f_sf_vc_v_fvv_se_e32mf2( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e32mf2: +define @test_sf_vc_fv_fvvi_se_e32m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv1f32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv2f32.nxv2f32.nxv2i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv1f32.iXLen.f32.iXLen(iXLen, , , float, iXLen) +declare @llvm.riscv.sf.vc.v.ivv.se.nxv2f32.nxv2f32.nxv2i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define @test_f_sf_vc_v_fvv_se_e32m1( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e32m1: +define void @test_sf_vc_fvvi_se_e32m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvi_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 3 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv2f32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f32.nxv4i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv2f32.iXLen.f32.iXLen(iXLen, , , float, iXLen) +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f32.nxv4i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define @test_f_sf_vc_v_fvv_se_e32m2( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e32m2: +define @test_sf_vc_fv_fvvi_se_e32m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0 +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 3 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv4f32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv4f32.nxv4f32.nxv4i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv4f32.iXLen.f32.iXLen(iXLen, , , float, iXLen) +declare @llvm.riscv.sf.vc.v.ivv.se.nxv4f32.nxv4f32.nxv4i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define @test_f_sf_vc_v_fvv_se_e32m4( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e32m4: +define void @test_sf_vc_fvvi_se_e32m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvi_se_e32m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0 +; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 3 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv8f32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f32.nxv8i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv8f32.iXLen.f32.iXLen(iXLen, , , float, iXLen) +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f32.nxv8i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define @test_f_sf_vc_v_fvv_se_e32m8( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e32m8: +define @test_sf_vc_fv_fvvi_se_e32m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 3 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv16f32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv8f32.nxv8f32.nxv8i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv16f32.iXLen.f32.iXLen(iXLen, , , float, iXLen) +declare @llvm.riscv.sf.vc.v.ivv.se.nxv8f32.nxv8f32.nxv8i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define @test_f_sf_vc_v_fvv_se_e64m1( %vd, %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e64m1: +define void @test_sf_vc_fvvi_se_e32m8( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvi_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.ivv 3, v8, v16, 3 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv1f64.iXLen.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f32.nxv16i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv1f64.iXLen.f64.iXLen(iXLen, , , double, iXLen) +declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f32.nxv16i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define @test_f_sf_vc_v_fvv_se_e64m2( %vd, %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e64m2: +define @test_sf_vc_fv_fvvi_se_e32m8( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 3 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv2f64.iXLen.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.ivv.se.nxv16f32.nxv16f32.nxv16i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv2f64.iXLen.f64.iXLen(iXLen, , , double, iXLen) +declare @llvm.riscv.sf.vc.v.ivv.se.nxv16f32.nxv16f32.nxv16i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define @test_f_sf_vc_v_fvv_se_e64m4( %vd, %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e64m4: +define void @test_sf_vc_fvvf_se_e16mf4( %vd, %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvf_se_e16mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv4f64.iXLen.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f16.nxv1i16.f16.iXLen(iXLen 1, %vd, %vs2, half %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv4f64.iXLen.f64.iXLen(iXLen, , , double, iXLen) +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f16.nxv1i16.f16.iXLen(iXLen, , , half, iXLen) -define @test_f_sf_vc_v_fvv_se_e64m8( %vd, %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e64m8: +define @test_sf_vc_fv_fvvf_se_e16mf4( %vd, %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16mf4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv8f64.iXLen.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv1f16.nxv1f16.nxv1i16.f16.iXLen(iXLen 1, %vd, %vs2, half %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.se.nxv8f64.iXLen.f64.iXLen(iXLen, , , double, iXLen) +declare @llvm.riscv.sf.vc.v.fvv.se.nxv1f16.nxv1f16.nxv1i16.f16.iXLen(iXLen, , , half %rs1, iXLen) -define @test_f_sf_vc_v_fvv_e16mf4( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_e16mf4: +define void @test_sf_vc_fvvf_se_e16mf2( %vd, %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvf_se_e16mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv1f16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f16.nxv2i16.f16.iXLen(iXLen 1, %vd, %vs2, half %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fvv.nxv1f16.iXLen.f16.iXLen(iXLen, , , half, iXLen) +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f16.nxv2i16.f16.iXLen(iXLen, , , half, iXLen) -define @test_f_sf_vc_v_fvv_e16mf2( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_e16mf2: +define @test_sf_vc_fv_fvvf_se_e16mf2( %vd, %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv2f16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv2f16.nxv2f16.nxv2i16.f16.iXLen(iXLen 1, %vd, %vs2, half %rs1, iXLen %vl) ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.nxv2f16.iXLen.f16.iXLen(iXLen, , , half, iXLen) +declare @llvm.riscv.sf.vc.v.fvv.se.nxv2f16.nxv2f16.nxv2i16.f16.iXLen(iXLen, , , half %rs1, iXLen) + +define void @test_sf_vc_fvvf_se_e16m1( %vd, %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvf_se_e16m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f16.nxv4i16.f16.iXLen(iXLen 1, %vd, %vs2, half %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f16.nxv4i16.f16.iXLen(iXLen, , , half, iXLen) -define @test_f_sf_vc_v_fvv_e16m1( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_e16m1: +define @test_sf_vc_fv_fvvf_se_e16m1( %vd, %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv4f16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv4f16.nxv4f16.nxv4i16.f16.iXLen(iXLen 1, %vd, %vs2, half %rs1, iXLen %vl) ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.nxv4f16.iXLen.f16.iXLen(iXLen, , , half, iXLen) +declare @llvm.riscv.sf.vc.v.fvv.se.nxv4f16.nxv4f16.nxv4i16.f16.iXLen(iXLen, , , half %rs1, iXLen) + +define void @test_sf_vc_fvvf_se_e16m2( %vd, %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvf_se_e16m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f16.nxv8i16.f16.iXLen(iXLen 1, %vd, %vs2, half %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f16.nxv8i16.f16.iXLen(iXLen, , , half, iXLen) -define @test_f_sf_vc_v_fvv_e16m2( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_e16m2: +define @test_sf_vc_fv_fvvf_se_e16m2( %vd, %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv8f16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv8f16.nxv8f16.nxv8i16.f16.iXLen(iXLen 1, %vd, %vs2, half %rs1, iXLen %vl) ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.nxv8f16.iXLen.f16.iXLen(iXLen, , , half, iXLen) +declare @llvm.riscv.sf.vc.v.fvv.se.nxv8f16.nxv8f16.nxv8i16.f16.iXLen(iXLen, , , half %rs1, iXLen) + +define void @test_sf_vc_fvvf_se_e16m4( %vd, %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvf_se_e16m4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f16.nxv16i16.f16.iXLen(iXLen 1, %vd, %vs2, half %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f16.nxv16i16.f16.iXLen(iXLen, , , half, iXLen) -define @test_f_sf_vc_v_fvv_e16m4( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_e16m4: +define @test_sf_vc_fv_fvvf_se_e16m4( %vd, %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv16f16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv16f16.nxv16f16.nxv16i16.f16.iXLen(iXLen 1, %vd, %vs2, half %rs1, iXLen %vl) ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.nxv16f16.iXLen.f16.iXLen(iXLen, , , half, iXLen) +declare @llvm.riscv.sf.vc.v.fvv.se.nxv16f16.nxv16f16.nxv16i16.f16.iXLen(iXLen, , , half %rs1, iXLen) + +define void @test_sf_vc_fvvf_se_e16m8( %vd, %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvf_se_e16m8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v16, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32f16.nxv32i16.f16.iXLen(iXLen 1, %vd, %vs2, half %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32f16.nxv32i16.f16.iXLen(iXLen, , , half, iXLen) -define @test_f_sf_vc_v_fvv_e16m8( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_e16m8: +define @test_sf_vc_fv_fvvf_se_e16m8( %vd, %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv32f16.iXLen.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv32f16.nxv32f16.nxv32i16.f16.iXLen(iXLen 1, %vd, %vs2, half %rs1, iXLen %vl) ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.nxv32f16.iXLen.f16.iXLen(iXLen, , , half, iXLen) +declare @llvm.riscv.sf.vc.v.fvv.se.nxv32f16.nxv32f16.nxv32i16.f16.iXLen(iXLen, , , half %rs1, iXLen) + +define void @test_sf_vc_fvvf_se_e32mf2( %vd, %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvf_se_e32mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f32.nxv1i32.f32.iXLen(iXLen 1, %vd, %vs2, float %rs1, iXLen %vl) + ret void +} + +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f32.nxv1i32.f32.iXLen(iXLen, , , float, iXLen) -define @test_f_sf_vc_v_fvv_e32mf2( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_e32mf2: +define @test_sf_vc_fv_fvvf_se_e32mf2( %vd, %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv1f32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv1f32.nxv1f32.nxv1i32.f32.iXLen(iXLen 1, %vd, %vs2, float %rs1, iXLen %vl) ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.nxv1f32.iXLen.f32.iXLen(iXLen, , , float, iXLen) +declare @llvm.riscv.sf.vc.v.fvv.se.nxv1f32.nxv1f32.nxv1i32.f32.iXLen(iXLen, , , float %rs1, iXLen) -define @test_f_sf_vc_v_fvv_e32m1( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_e32m1: +define void @test_sf_vc_fvvf_se_e32m1( %vd, %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvf_se_e32m1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv2f32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f32.nxv2i32.f32.iXLen(iXLen 1, %vd, %vs2, float %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fvv.nxv2f32.iXLen.f32.iXLen(iXLen, , , float, iXLen) +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f32.nxv2i32.f32.iXLen(iXLen, , , float, iXLen) -define @test_f_sf_vc_v_fvv_e32m2( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_e32m2: +define @test_sf_vc_fv_fvvf_se_e32m1( %vd, %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv4f32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv2f32.nxv2f32.nxv2i32.f32.iXLen(iXLen 1, %vd, %vs2, float %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.nxv4f32.iXLen.f32.iXLen(iXLen, , , float, iXLen) +declare @llvm.riscv.sf.vc.v.fvv.se.nxv2f32.nxv2f32.nxv2i32.f32.iXLen(iXLen, , , float %rs1, iXLen) -define @test_f_sf_vc_v_fvv_e32m4( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_e32m4: +define void @test_sf_vc_fvvf_se_e32m2( %vd, %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvf_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv8f32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f32.nxv4i32.f32.iXLen(iXLen 1, %vd, %vs2, float %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fvv.nxv8f32.iXLen.f32.iXLen(iXLen, , , float, iXLen) +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f32.nxv4i32.f32.iXLen(iXLen, , , float, iXLen) -define @test_f_sf_vc_v_fvv_e32m8( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_e32m8: +define @test_sf_vc_fv_fvvf_se_e32m2( %vd, %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv16f32.iXLen.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv4f32.nxv4f32.nxv4i32.f32.iXLen(iXLen 1, %vd, %vs2, float %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.nxv16f32.iXLen.f32.iXLen(iXLen, , , float, iXLen) +declare @llvm.riscv.sf.vc.v.fvv.se.nxv4f32.nxv4f32.nxv4i32.f32.iXLen(iXLen, , , float %rs1, iXLen) -define @test_f_sf_vc_v_fvv_e64m1( %vd, %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_e64m1: +define void @test_sf_vc_fvvf_se_e32m4( %vd, %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvf_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv1f64.iXLen.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f32.nxv8i32.f32.iXLen(iXLen 1, %vd, %vs2, float %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fvv.nxv1f64.iXLen.f64.iXLen(iXLen, , , double, iXLen) +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f32.nxv8i32.f32.iXLen(iXLen, , , float, iXLen) -define @test_f_sf_vc_v_fvv_e64m2( %vd, %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_e64m2: +define @test_sf_vc_fv_fvvf_se_e32m4( %vd, %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv2f64.iXLen.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv8f32.nxv8f32.nxv8i32.f32.iXLen(iXLen 1, %vd, %vs2, float %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.nxv2f64.iXLen.f64.iXLen(iXLen, , , double, iXLen) +declare @llvm.riscv.sf.vc.v.fvv.se.nxv8f32.nxv8f32.nxv8i32.f32.iXLen(iXLen, , , float %rs1, iXLen) -define @test_f_sf_vc_v_fvv_e64m4( %vd, %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_e64m4: +define void @test_sf_vc_fvvf_se_e32m8( %vd, %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fvvf_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vc.fvv 1, v8, v16, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv4f64.iXLen.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f32.nxv16i32.f32.iXLen(iXLen 1, %vd, %vs2, float %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fvv.nxv4f64.iXLen.f64.iXLen(iXLen, , , double, iXLen) +declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f32.nxv16i32.f32.iXLen(iXLen, , , float, iXLen) -define @test_f_sf_vc_v_fvv_e64m8( %vd, %vs2, double %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvv_e64m8: +define @test_sf_vc_fv_fvvf_se_e32m8( %vd, %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvv.nxv8f64.iXLen.f64.iXLen(iXLen 1, %vd, %vs2, double %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.fvv.se.nxv16f32.nxv16f32.nxv16i32.f32.iXLen(iXLen 1, %vd, %vs2, float %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fvv.nxv8f64.iXLen.f64.iXLen(iXLen, , , double, iXLen) +declare @llvm.riscv.sf.vc.v.fvv.se.nxv16f32.nxv16f32.nxv16i32.f32.iXLen(iXLen, , , float %rs1, iXLen) + diff --git a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll index 35754aa022686..4c0833b2ff0cb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll +++ b/llvm/test/CodeGen/RISCV/rvv/xsfvcp-xvw.ll @@ -1759,1757 +1759,939 @@ entry: declare @llvm.riscv.sf.vc.v.ivw.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define void @test_sf_vc_fvw_se_e16mf4( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvw_se_e16mf4: +define void @test_sf_vc_fwvv_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvv_se_e32mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1i32.nxv1i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1i32.nxv1i16.f16.iXLen(iXLen, , , half, iXLen) - -define void @test_sf_vc_fvw_se_e16mf2( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvw_se_e16mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2i32.nxv2i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2i32.nxv2i16.f16.iXLen(iXLen, , , half, iXLen) - -define void @test_sf_vc_fvw_se_e16m1( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvw_se_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v10, fa0 +; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4i32.nxv4i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f32.nxv1i16.nxv1i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4i32.nxv4i16.f16.iXLen(iXLen, , , half, iXLen) +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f32.nxv1i16.nxv1i16.iXLen(iXLen, , , , iXLen) -define void @test_sf_vc_fvw_se_e16m2( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvw_se_e16m2: +define @test_sf_vc_fw_fwvvv_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v12, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8i32.nxv8i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv1f32.nxv1i16.nxv1i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8i32.nxv8i16.f16.iXLen(iXLen, , , half, iXLen) +declare @llvm.riscv.sf.vc.v.vvw.se.nxv1f32.nxv1i16.nxv1i16.iXLen(iXLen, , , , iXLen) -define void @test_sf_vc_fvw_se_e16m4( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvw_se_e16m4: +define void @test_sf_vc_fwvv_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvv_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16i32.nxv16i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f32.nxv2i16.nxv2i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16i32.nxv16i16.f16.iXLen(iXLen, , , half, iXLen) +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f32.nxv2i16.nxv2i16.iXLen(iXLen, , , , iXLen) -define void @test_sf_vc_fvw_se_e32mf2( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvw_se_e32mf2: +define @test_sf_vc_fw_fwvvv_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1i64.nxv1i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv2f32.nxv2i16.nxv2i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1i64.nxv1i32.f32.iXLen(iXLen, , , float, iXLen) +declare @llvm.riscv.sf.vc.v.vvw.se.nxv2f32.nxv2i16.nxv2i16.iXLen(iXLen, , , , iXLen) -define void @test_sf_vc_fvw_se_e32m1( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvw_se_e32m1: +define void @test_sf_vc_fwvv_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvv_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v10, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2i64.nxv2i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f32.nxv4i16.nxv4i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2i64.nxv2i32.f32.iXLen(iXLen, , , float, iXLen) +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f32.nxv4i16.nxv4i16.iXLen(iXLen, , , , iXLen) -define void @test_sf_vc_fvw_se_e32m2( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvw_se_e32m2: +define @test_sf_vc_fw_fwvvv_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v12, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4i64.nxv4i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv4f32.nxv4i16.nxv4i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4i64.nxv4i32.f32.iXLen(iXLen, , , float, iXLen) +declare @llvm.riscv.sf.vc.v.vvw.se.nxv4f32.nxv4i16.nxv4i16.iXLen(iXLen, , , , iXLen) -define void @test_sf_vc_fvw_se_e32m4( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_fvw_se_e32m4: +define void @test_sf_vc_fwvv_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvv_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8i64.nxv8i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f32.nxv8i16.nxv8i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8i64.nxv8i32.f32.iXLen(iXLen, , , float, iXLen) - -define @test_sf_vc_v_fvw_se_e16mf4( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_se_e16mf4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv1i32.iXLen.nxv1i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvw.se.nxv1i32.iXLen.nxv1i16.f16.iXLen(iXLen, , , half, iXLen) - -define @test_sf_vc_v_fvw_se_e16mf2( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_se_e16mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv2i32.iXLen.nxv2i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvw.se.nxv2i32.iXLen.nxv2i16.f16.iXLen(iXLen, , , half, iXLen) - -define @test_sf_vc_v_fvw_se_e16m1( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_se_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv4i32.iXLen.nxv4i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvw.se.nxv4i32.iXLen.nxv4i16.f16.iXLen(iXLen, , , half, iXLen) +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f32.nxv8i16.nxv8i16.iXLen(iXLen, , , , iXLen) -define @test_sf_vc_v_fvw_se_e16m2( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_se_e16m2: +define @test_sf_vc_fw_fwvvv_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv8i32.iXLen.nxv8i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvw.se.nxv8i32.iXLen.nxv8i16.f16.iXLen(iXLen, , , half, iXLen) - -define @test_sf_vc_v_fvw_se_e16m4( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_se_e16m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v16, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv16i32.iXLen.nxv16i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvw.se.nxv16i32.iXLen.nxv16i16.f16.iXLen(iXLen, , , half, iXLen) - -define @test_sf_vc_v_fvw_se_e32mf2( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_se_e32mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv1i64.iXLen.nxv1i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvw.se.nxv1i64.iXLen.nxv1i32.f32.iXLen(iXLen, , , float, iXLen) - -define @test_sf_vc_v_fvw_se_e32m1( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_se_e32m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv2i64.iXLen.nxv2i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvw.se.nxv2i64.iXLen.nxv2i32.f32.iXLen(iXLen, , , float, iXLen) - -define @test_sf_vc_v_fvw_se_e32m2( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_se_e32m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv4i64.iXLen.nxv4i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvw.se.nxv4i64.iXLen.nxv4i32.f32.iXLen(iXLen, , , float, iXLen) - -define @test_sf_vc_v_fvw_se_e32m4( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_se_e32m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v16, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv8i64.iXLen.nxv8i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvw.se.nxv8i64.iXLen.nxv8i32.f32.iXLen(iXLen, , , float, iXLen) - -define @test_sf_vc_v_fvw_e16mf4( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_e16mf4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv1i32.iXLen.nxv1i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvw.nxv1i32.iXLen.nxv1i16.f16.iXLen(iXLen, , , half, iXLen) - -define @test_sf_vc_v_fvw_e16mf2( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_e16mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv2i32.iXLen.nxv2i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvw.nxv2i32.iXLen.nxv2i16.f16.iXLen(iXLen, , , half, iXLen) - -define @test_sf_vc_v_fvw_e16m1( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0 +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv4i32.iXLen.nxv4i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv8f32.nxv8i16.nxv8i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.nxv4i32.iXLen.nxv4i16.f16.iXLen(iXLen, , , half, iXLen) +declare @llvm.riscv.sf.vc.v.vvw.se.nxv8f32.nxv8i16.nxv8i16.iXLen(iXLen, , , , iXLen) -define @test_sf_vc_v_fvw_e16m2( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_e16m2: +define void @test_sf_vc_fwvv_se_e32m8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvv_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.vvw 3, v8, v16, v20 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv8i32.iXLen.nxv8i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16f32.nxv16i16.nxv16i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fvw.nxv8i32.iXLen.nxv8i16.f16.iXLen(iXLen, , , half, iXLen) +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16f32.nxv16i16.nxv16i16.iXLen(iXLen, , , , iXLen) -define @test_sf_vc_v_fvw_e16m4( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_e16m4: +define @test_sf_vc_fw_fwvvv_se_e32m8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v16, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv16i32.iXLen.nxv16i16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvw.nxv16i32.iXLen.nxv16i16.f16.iXLen(iXLen, , , half, iXLen) - -define @test_sf_vc_v_fvw_e32mf2( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_e32mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv1i64.iXLen.nxv1i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvw.nxv1i64.iXLen.nxv1i32.f32.iXLen(iXLen, , , float, iXLen) - -define @test_sf_vc_v_fvw_e32m1( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_e32m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv2i64.iXLen.nxv2i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvw.nxv2i64.iXLen.nxv2i32.f32.iXLen(iXLen, , , float, iXLen) - -define @test_sf_vc_v_fvw_e32m2( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_e32m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv4i64.iXLen.nxv4i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvw.nxv4i64.iXLen.nxv4i32.f32.iXLen(iXLen, , , float, iXLen) - -define @test_sf_vc_v_fvw_e32m4( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_sf_vc_v_fvw_e32m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v16, fa0 +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv8i64.iXLen.nxv8i32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv16f32.nxv16i16.nxv16i16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.nxv8i64.iXLen.nxv8i32.f32.iXLen(iXLen, , , float, iXLen) +declare @llvm.riscv.sf.vc.v.vvw.se.nxv16f32.nxv16i16.nxv16i16.iXLen(iXLen, , , , iXLen) -define void @test_f_sf_vc_vvw_se_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vvw_se_e16mf4: +define void @test_sf_vc_fwvv_se_e64m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvv_se_e64m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f32.nxv1f16.nxv1f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f64.nxv1i32.nxv1i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f32.nxv1f16.nxv1f16.iXLen(iXLen, , , , iXLen) +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f64.nxv1i32.nxv1i32.iXLen(iXLen, , , , iXLen) -define void @test_f_sf_vc_vvw_se_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vvw_se_e16mf2: +define @test_sf_vc_fw_fwvvv_se_e64m1( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f32.nxv2f16.nxv2f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv1f64.nxv1i32.nxv1i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f32.nxv2f16.nxv2f16.iXLen(iXLen, , , , iXLen) +declare @llvm.riscv.sf.vc.v.vvw.se.nxv1f64.nxv1i32.nxv1i32.iXLen(iXLen, , , , iXLen) -define void @test_f_sf_vc_vvw_se_e16m1( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vvw_se_e16m1: +define void @test_sf_vc_fwvv_se_e64m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvv_se_e64m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f32.nxv4f16.nxv4f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f64.nxv2i32.nxv2i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f32.nxv4f16.nxv4f16.iXLen(iXLen, , , , iXLen) +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f64.nxv2i32.nxv2i32.iXLen(iXLen, , , , iXLen) -define void @test_f_sf_vc_vvw_se_e16m2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vvw_se_e16m2: +define @test_sf_vc_fw_fwvvv_se_e64m2( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f32.nxv8f16.nxv8f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv2f64.nxv2i32.nxv2i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f32.nxv8f16.nxv8f16.iXLen(iXLen, , , , iXLen) +declare @llvm.riscv.sf.vc.v.vvw.se.nxv2f64.nxv2i32.nxv2i32.iXLen(iXLen, , , , iXLen) -define void @test_f_sf_vc_vvw_se_e16m4( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vvw_se_e16m4: +define void @test_sf_vc_fwvv_se_e64m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvv_se_e64m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.vvw 3, v8, v16, v20 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16f32.nxv16f16.nxv16f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f64.nxv4i32.nxv4i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16f32.nxv16f16.nxv16f16.iXLen(iXLen, , , , iXLen) +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f64.nxv4i32.nxv4i32.iXLen(iXLen, , , , iXLen) -define void @test_f_sf_vc_vvw_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vvw_se_e32mf2: +define @test_sf_vc_fw_fwvvv_se_e64m4( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f64.nxv1f32.nxv1f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv4f64.nxv4i32.nxv4i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f64.nxv1f32.nxv1f32.iXLen(iXLen, , , , iXLen) +declare @llvm.riscv.sf.vc.v.vvw.se.nxv4f64.nxv4i32.nxv4i32.iXLen(iXLen, , , , iXLen) -define void @test_f_sf_vc_vvw_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vvw_se_e32m1: +define void @test_sf_vc_fwvv_se_e64m8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvv_se_e64m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.vvw 3, v8, v16, v20 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f64.nxv2f32.nxv2f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f64.nxv8i32.nxv8i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f64.nxv2f32.nxv2f32.iXLen(iXLen, , , , iXLen) +declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f64.nxv8i32.nxv8i32.iXLen(iXLen, , , , iXLen) -define void @test_f_sf_vc_vvw_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vvw_se_e32m2: +define @test_sf_vc_fw_fwvvv_se_e64m8( %vd, %vs2, %vs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f64.nxv4f32.nxv4f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv8f64.nxv8i32.nxv8i32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f64.nxv4f32.nxv4f32.iXLen(iXLen, , , , iXLen) +declare @llvm.riscv.sf.vc.v.vvw.se.nxv8f64.nxv8i32.nxv8i32.iXLen(iXLen, , , , iXLen) -define void @test_f_sf_vc_vvw_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_vvw_se_e32m4: +define void @test_sf_vc_fwvx_se_e32mf2( %vd, %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvx_se_e32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.vvw 3, v8, v16, v20 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f64.nxv8f32.nxv8f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f32.nxv1i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f64.nxv8f32.nxv8f32.iXLen(iXLen, , , , iXLen) +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f32.nxv1i16.i16.iXLen(iXLen, , , i16, iXLen) -define @test_f_sf_vc_v_vvw_se_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e16mf4: +define @test_sf_vc_w_fwvx_se_e32mf2( %vd, %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma -; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv1f32.iXLen.nxv1f16.nxv1f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) + %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv1f32.nxv1f16.nxv1i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) ret %0 } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv1f32.iXLen.nxv1f16.nxv1f16.iXLen(iXLen, , , , iXLen) +declare @llvm.riscv.sf.vc.v.xvw.se.nxv1f32.nxv1f16.nxv1i16.i16.iXLen(iXLen, , , i16, iXLen) -define @test_f_sf_vc_v_vvw_se_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e16mf2: +define void @test_sf_vc_fwvx_se_e32m1( %vd, %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvx_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma -; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv2f32.iXLen.nxv2f16.nxv2f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f32.nxv2i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.vvw.se.nxv2f32.iXLen.nxv2f16.nxv2f16.iXLen(iXLen, , , , iXLen) +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f32.nxv2i16.i16.iXLen(iXLen, , , i16, iXLen) -define @test_f_sf_vc_v_vvw_se_e16m1( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e16m1: +define @test_sf_vc_w_fwvx_se_e32m1( %vd, %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma -; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11 +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv4f32.iXLen.nxv4f16.nxv4f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvw.se.nxv4f32.iXLen.nxv4f16.nxv4f16.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvw_se_e16m2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e16m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma -; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv8f32.iXLen.nxv8f16.nxv8f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvw.se.nxv8f32.iXLen.nxv8f16.nxv8f16.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvw_se_e16m4( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e16m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma -; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv16f32.iXLen.nxv16f16.nxv16f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvw.se.nxv16f32.iXLen.nxv16f16.nxv16f16.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvw_se_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e32mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma -; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv1f64.iXLen.nxv1f32.nxv1f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvw.se.nxv1f64.iXLen.nxv1f32.nxv1f32.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvw_se_e32m1( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e32m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma -; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv2f64.iXLen.nxv2f32.nxv2f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvw.se.nxv2f64.iXLen.nxv2f32.nxv2f32.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvw_se_e32m2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e32m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma -; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv4f64.iXLen.nxv4f32.nxv4f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvw.se.nxv4f64.iXLen.nxv4f32.nxv4f32.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvw_se_e32m4( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e32m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma -; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvw.se.nxv8f64.iXLen.nxv8f32.nxv8f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvw.se.nxv8f64.iXLen.nxv8f32.nxv8f32.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvw_e16mf4( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvw_e16mf4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma -; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvw.nxv1f32.iXLen.nxv1f16.nxv1f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvw.nxv1f32.iXLen.nxv1f16.nxv1f16.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvw_e16mf2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvw_e16mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma -; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvw.nxv2f32.iXLen.nxv2f16.nxv2f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvw.nxv2f32.iXLen.nxv2f16.nxv2f16.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvw_e16m1( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvw_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma -; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvw.nxv4f32.iXLen.nxv4f16.nxv4f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvw.nxv4f32.iXLen.nxv4f16.nxv4f16.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvw_e16m2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvw_e16m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma -; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvw.nxv8f32.iXLen.nxv8f16.nxv8f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvw.nxv8f32.iXLen.nxv8f16.nxv8f16.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvw_e16m4( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvw_e16m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma -; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvw.nxv16f32.iXLen.nxv16f16.nxv16f16.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvw.nxv16f32.iXLen.nxv16f16.nxv16f16.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvw_e32mf2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvw_e32mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma -; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvw.nxv1f64.iXLen.nxv1f32.nxv1f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvw.nxv1f64.iXLen.nxv1f32.nxv1f32.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvw_e32m1( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvw_e32m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma -; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvw.nxv2f64.iXLen.nxv2f32.nxv2f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvw.nxv2f64.iXLen.nxv2f32.nxv2f32.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvw_e32m2( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvw_e32m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma -; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvw.nxv4f64.iXLen.nxv4f32.nxv4f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvw.nxv4f64.iXLen.nxv4f32.nxv4f32.iXLen(iXLen, , , , iXLen) - -define @test_f_sf_vc_v_vvw_e32m4( %vd, %vs2, %vs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_vvw_e32m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma -; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.vvw.nxv8f64.iXLen.nxv8f32.nxv8f32.iXLen(iXLen 3, %vd, %vs2, %vs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.vvw.nxv8f64.iXLen.nxv8f32.nxv8f32.iXLen(iXLen, , , , iXLen) - -define void @test_f_sf_vc_xvw_se_e16mf4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xvw_se_e16mf4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f32.nxv1f16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f32.nxv1f16.i16.iXLen(iXLen, , , i16, iXLen) - -define void @test_f_sf_vc_xvw_se_e16mf2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xvw_se_e16mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f32.nxv2f16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f32.nxv2f16.i16.iXLen(iXLen, , , i16, iXLen) - -define void @test_f_sf_vc_xvw_se_e16m1( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xvw_se_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f32.nxv4f16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f32.nxv4f16.i16.iXLen(iXLen, , , i16, iXLen) - -define void @test_f_sf_vc_xvw_se_e16m2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xvw_se_e16m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f32.nxv8f16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f32.nxv8f16.i16.iXLen(iXLen, , , i16, iXLen) - -define void @test_f_sf_vc_xvw_se_e16m4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xvw_se_e16m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.xvw 3, v8, v16, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16f32.nxv16f16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16f32.nxv16f16.i16.iXLen(iXLen, , , i16, iXLen) - -define void @test_f_sf_vc_xvw_se_e32mf2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xvw_se_e32mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f64.nxv1f32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f64.nxv1f32.i32.iXLen(iXLen, , , i32, iXLen) - -define void @test_f_sf_vc_xvw_se_e32m1( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xvw_se_e32m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f64.nxv2f32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f64.nxv2f32.i32.iXLen(iXLen, , , i32, iXLen) - -define void @test_f_sf_vc_xvw_se_e32m2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xvw_se_e32m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f64.nxv4f32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f64.nxv4f32.i32.iXLen(iXLen, , , i32, iXLen) - -define void @test_f_sf_vc_xvw_se_e32m4( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_xvw_se_e32m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.xvw 3, v8, v16, a0 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f64.nxv8f32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f64.nxv8f32.i32.iXLen(iXLen, , , i32, iXLen) - -define @test_f_sf_vc_v_xvw_se_e16mf4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e16mf4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma -; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv1f32.iXLen.nxv1f16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvw.se.nxv1f32.iXLen.nxv1f16.i16.iXLen(iXLen, , , i16, iXLen) - -define @test_f_sf_vc_v_xvw_se_e16mf2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e16mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma -; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv2f32.iXLen.nxv2f16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv2f32.nxv2f16.nxv2i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) ret %0 } -declare @llvm.riscv.sf.vc.v.xvw.se.nxv2f32.iXLen.nxv2f16.i16.iXLen(iXLen, , , i16, iXLen) - -define @test_f_sf_vc_v_xvw_se_e16m1( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma -; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv4f32.iXLen.nxv4f16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvw.se.nxv4f32.iXLen.nxv4f16.i16.iXLen(iXLen, , , i16, iXLen) - -define @test_f_sf_vc_v_xvw_se_e16m2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e16m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma -; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv8f32.iXLen.nxv8f16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvw.se.nxv8f32.iXLen.nxv8f16.i16.iXLen(iXLen, , , i16, iXLen) - -define @test_f_sf_vc_v_xvw_se_e16m4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e16m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma -; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv16f32.iXLen.nxv16f16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvw.se.nxv16f32.iXLen.nxv16f16.i16.iXLen(iXLen, , , i16, iXLen) - -define @test_f_sf_vc_v_xvw_se_e32mf2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e32mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma -; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv1f64.i32.nxv1f32.iXLen.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvw.se.nxv1f64.i32.nxv1f32.iXLen.iXLen(iXLen, , , i32, iXLen) - -define @test_f_sf_vc_v_xvw_se_e32m1( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e32m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma -; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv2f64.i32.nxv2f32.iXLen.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvw.se.nxv2f64.i32.nxv2f32.iXLen.iXLen(iXLen, , , i32, iXLen) - -define @test_f_sf_vc_v_xvw_se_e32m2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e32m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma -; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv4f64.i32.nxv4f32.iXLen.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvw.se.nxv4f64.i32.nxv4f32.iXLen.iXLen(iXLen, , , i32, iXLen) - -define @test_f_sf_vc_v_xvw_se_e32m4( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e32m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma -; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv8f64.i32.nxv8f32.iXLen.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvw.se.nxv8f64.i32.nxv8f32.iXLen.iXLen(iXLen, , , i32, iXLen) - -define @test_f_sf_vc_v_xvw_e16mf4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvw_e16mf4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma -; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvw.nxv1f32.iXLen.nxv1f16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvw.nxv1f32.iXLen.nxv1f16.i16.iXLen(iXLen, , , i16, iXLen) - -define @test_f_sf_vc_v_xvw_e16mf2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvw_e16mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma -; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvw.nxv2f32.iXLen.nxv2f16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvw.nxv2f32.iXLen.nxv2f16.i16.iXLen(iXLen, , , i16, iXLen) - -define @test_f_sf_vc_v_xvw_e16m1( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvw_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma -; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvw.nxv4f32.iXLen.nxv4f16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvw.nxv4f32.iXLen.nxv4f16.i16.iXLen(iXLen, , , i16, iXLen) - -define @test_f_sf_vc_v_xvw_e16m2( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvw_e16m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma -; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvw.nxv8f32.iXLen.nxv8f16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvw.nxv8f32.iXLen.nxv8f16.i16.iXLen(iXLen, , , i16, iXLen) - -define @test_f_sf_vc_v_xvw_e16m4( %vd, %vs2, i16 zeroext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvw_e16m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma -; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvw.nxv16f32.iXLen.nxv16f16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvw.nxv16f32.iXLen.nxv16f16.i16.iXLen(iXLen, , , i16, iXLen) - -define @test_f_sf_vc_v_xvw_e32mf2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvw_e32mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma -; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvw.nxv1f64.iXLen.nxv1f32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvw.nxv1f64.iXLen.nxv1f32.i32.iXLen(iXLen, , , i32, iXLen) - -define @test_f_sf_vc_v_xvw_e32m1( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvw_e32m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma -; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvw.nxv2f64.iXLen.nxv2f32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvw.nxv2f64.iXLen.nxv2f32.i32.iXLen(iXLen, , , i32, iXLen) - -define @test_f_sf_vc_v_xvw_e32m2( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvw_e32m2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma -; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvw.nxv4f64.iXLen.nxv4f32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvw.nxv4f64.iXLen.nxv4f32.i32.iXLen(iXLen, , , i32, iXLen) - -define @test_f_sf_vc_v_xvw_e32m4( %vd, %vs2, i32 signext %rs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_xvw_e32m4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma -; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.xvw.nxv8f64.iXLen.nxv8f32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.xvw.nxv8f64.iXLen.nxv8f32.i32.iXLen(iXLen, , , i32, iXLen) - -define void @test_f_sf_vc_ivw_se_e16mf4( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_ivw_se_e16mf4: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f32.nxv1f16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f32.nxv1f16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - -define void @test_f_sf_vc_ivw_se_e16mf2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_ivw_se_e16mf2: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f32.nxv2f16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f32.nxv2f16.iXLen.iXLen(iXLen, , , iXLen, iXLen) - -define void @test_f_sf_vc_ivw_se_e16m1( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_ivw_se_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 10 -; CHECK-NEXT: ret -entry: - tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f32.nxv4f16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret void -} - -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f32.nxv4f16.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xvw.se.nxv2f32.nxv2f16.nxv2i16.i16.iXLen(iXLen, , , i16, iXLen) -define void @test_f_sf_vc_ivw_se_e16m2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_ivw_se_e16m2: +define void @test_sf_vc_fwvx_se_e32m2( %vd, %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvx_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f32.nxv8f16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f32.nxv4i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f32.nxv8f16.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f32.nxv4i16.i16.iXLen(iXLen, , , i16, iXLen) -define void @test_f_sf_vc_ivw_se_e16m4( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_ivw_se_e16m4: +define @test_sf_vc_w_fwvx_se_e32m2( %vd, %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.ivw 3, v8, v16, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16f32.nxv16f16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv4f32.nxv4f16.nxv4i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16f32.nxv16f16.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xvw.se.nxv4f32.nxv4f16.nxv4i16.i16.iXLen(iXLen, , , i16, iXLen) -define void @test_f_sf_vc_ivw_se_e32mf2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_ivw_se_e32mf2: +define void @test_sf_vc_fwvx_se_e32m4( %vd, %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvx_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f64.nxv1f32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f32.nxv8i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f64.nxv1f32.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f32.nxv8i16.i16.iXLen(iXLen, , , i16, iXLen) -define void @test_f_sf_vc_ivw_se_e32m1( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_ivw_se_e32m1: +define @test_sf_vc_w_fwvx_se_e32m4( %vd, %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f64.nxv2f32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv8f32.nxv8f16.nxv8i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f64.nxv2f32.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xvw.se.nxv8f32.nxv8f16.nxv8i16.i16.iXLen(iXLen, , , i16, iXLen) -define void @test_f_sf_vc_ivw_se_e32m2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_ivw_se_e32m2: +define void @test_sf_vc_fwvx_se_e32m8( %vd, %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvx_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v16, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f64.nxv4f32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16f32.nxv16i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f64.nxv4f32.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16f32.nxv16i16.i16.iXLen(iXLen, , , i16, iXLen) -define void @test_f_sf_vc_ivw_se_e32m4( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_ivw_se_e32m4: +define @test_sf_vc_w_fwvx_se_e32m8( %vd, %vs2, i16 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.ivw 3, v8, v16, 10 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f64.nxv8f32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv16f32.nxv16f16.nxv16i16.i16.iXLen(iXLen 3, %vd, %vs2, i16 %rs1, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f64.nxv8f32.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xvw.se.nxv16f32.nxv16f16.nxv16i16.i16.iXLen(iXLen, , , i16, iXLen) -define @test_f_sf_vc_v_ivw_se_e16mf4( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e16mf4: +define void @test_sf_vc_fwvx_se_e64m1( %vd, %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvx_se_e64m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma -; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv1f32.iXLen.nxv1f16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f64.nxv1i32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv1f32.iXLen.nxv1f16.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f64.nxv1i32.i32.iXLen(iXLen, , , i32, iXLen) -define @test_f_sf_vc_v_ivw_se_e16mf2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e16mf2: +define @test_sf_vc_w_fwvx_se_e64m1( %vd, %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma -; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv2f32.iXLen.nxv2f16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv1f64.nxv1f32.nxv1i32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv2f32.iXLen.nxv2f16.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xvw.se.nxv1f64.nxv1f32.nxv1i32.i32.iXLen(iXLen, , , i32, iXLen) -define @test_f_sf_vc_v_ivw_se_e16m1( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e16m1: +define void @test_sf_vc_fwvx_se_e64m2( %vd, %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvx_se_e64m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma -; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv4f32.iXLen.nxv4f16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f64.nxv2i32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv4f32.iXLen.nxv4f16.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f64.nxv2i32.i32.iXLen(iXLen, , , i32, iXLen) -define @test_f_sf_vc_v_ivw_se_e16m2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e16m2: +define @test_sf_vc_w_fwvx_se_e64m2( %vd, %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma -; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv8f32.iXLen.nxv8f16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv2f64.nxv2f32.nxv2i32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv8f32.iXLen.nxv8f16.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xvw.se.nxv2f64.nxv2f32.nxv2i32.i32.iXLen(iXLen, , , i32, iXLen) -define @test_f_sf_vc_v_ivw_se_e16m4( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e16m4: +define void @test_sf_vc_fwvx_se_e64m4( %vd, %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvx_se_e64m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma -; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv16f32.iXLen.nxv16f16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f64.nxv4i32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv16f32.iXLen.nxv16f16.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f64.nxv4i32.i32.iXLen(iXLen, , , i32, iXLen) -define @test_f_sf_vc_v_ivw_se_e32mf2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e32mf2: +define @test_sf_vc_w_fwvx_se_e64m4( %vd, %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma -; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv1f64.iXLen.nxv1f32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv4f64.nxv4f32.nxv4i32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv1f64.iXLen.nxv1f32.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xvw.se.nxv4f64.nxv4f32.nxv4i32.i32.iXLen(iXLen, , , i32, iXLen) -define @test_f_sf_vc_v_ivw_se_e32m1( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e32m1: +define void @test_sf_vc_fwvx_se_e64m8( %vd, %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvx_se_e64m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma -; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.xvw 3, v8, v16, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv2f64.iXLen.nxv2f32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f64.nxv8i32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv2f64.iXLen.nxv2f32.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f64.nxv8i32.i32.iXLen(iXLen, , , i32, iXLen) -define @test_f_sf_vc_v_ivw_se_e32m2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e32m2: +define @test_sf_vc_w_fwvx_se_e64m8( %vd, %vs2, i32 %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma -; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma +; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv4f64.iXLen.nxv4f32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.xvw.se.nxv8f64.nxv8f32.nxv8i32.i32.iXLen(iXLen 3, %vd, %vs2, i32 %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv4f64.iXLen.nxv4f32.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.xvw.se.nxv8f64.nxv8f32.nxv8i32.i32.iXLen(iXLen, , , i32, iXLen) -define @test_f_sf_vc_v_ivw_se_e32m4( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e32m4: +define void @test_sf_vc_fwvi_se_e32mf2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvi_se_e32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma -; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv8f64.iXLen.nxv8f32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f32.nxv1i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.ivw.se.nxv8f64.iXLen.nxv8f32.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f32.nxv1i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define @test_f_sf_vc_v_ivw_e16mf4( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivw_e16mf4: +define @test_sf_vc_fw_fwvi_se_e32mf2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma -; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivw.nxv1f32.iXLen.nxv1f16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) + %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv1f32.nxv1f16.nxv1i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv1f32.iXLen.nxv1f16.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.ivw.se.nxv1f32.nxv1f16.nxv1i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define @test_f_sf_vc_v_ivw_e16mf2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivw_e16mf2: +define void @test_sf_vc_fwvi_se_e32m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvi_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma -; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivw.nxv2f32.iXLen.nxv2f16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f32.nxv2i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.ivw.nxv2f32.iXLen.nxv2f16.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f32.nxv2i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define @test_f_sf_vc_v_ivw_e16m1( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivw_e16m1: +define @test_sf_vc_fw_fwvi_se_e32m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma -; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivw.nxv4f32.iXLen.nxv4f16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv2f32.nxv2f16.nxv2i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv4f32.iXLen.nxv4f16.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.ivw.se.nxv2f32.nxv2f16.nxv2i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define @test_f_sf_vc_v_ivw_e16m2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivw_e16m2: +define void @test_sf_vc_fwvi_se_e32m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvi_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma -; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 3 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivw.nxv8f32.iXLen.nxv8f16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f32.nxv4i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.ivw.nxv8f32.iXLen.nxv8f16.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f32.nxv4i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define @test_f_sf_vc_v_ivw_e16m4( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivw_e16m4: +define @test_sf_vc_fw_fwvi_se_e32m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma -; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 3 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivw.nxv16f32.iXLen.nxv16f16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv4f32.nxv4f16.nxv4i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv16f32.iXLen.nxv16f16.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.ivw.se.nxv4f32.nxv4f16.nxv4i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define @test_f_sf_vc_v_ivw_e32mf2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivw_e32mf2: +define void @test_sf_vc_fwvi_se_e32m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvi_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma -; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 3 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivw.nxv1f64.iXLen.nxv1f32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f32.nxv8i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.ivw.nxv1f64.iXLen.nxv1f32.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f32.nxv8i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define @test_f_sf_vc_v_ivw_e32m1( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivw_e32m1: +define @test_sf_vc_fw_fwvi_se_e32m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma -; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 3 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivw.nxv2f64.iXLen.nxv2f32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv8f32.nxv8f16.nxv8i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv2f64.iXLen.nxv2f32.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.ivw.se.nxv8f32.nxv8f16.nxv8i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define @test_f_sf_vc_v_ivw_e32m2( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivw_e32m2: +define void @test_sf_vc_fwvi_se_e32m8( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvi_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma -; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v16, 3 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivw.nxv4f64.iXLen.nxv4f32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16f32.nxv16i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.ivw.nxv4f64.iXLen.nxv4f32.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16f32.nxv16i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define @test_f_sf_vc_v_ivw_e32m4( %vd, %vs2, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_ivw_e32m4: +define @test_sf_vc_fw_fwvi_se_e32m8( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma -; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 3 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.ivw.nxv8f64.iXLen.nxv8f32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 10, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv16f32.nxv16f16.nxv16i16.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.ivw.nxv8f64.iXLen.nxv8f32.iXLen.iXLen(iXLen, , , iXLen, iXLen) +declare @llvm.riscv.sf.vc.v.ivw.se.nxv16f32.nxv16f16.nxv16i16.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define void @test_f_sf_vc_fvw_se_e16mf4( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fvw_se_e16mf4: +define void @test_sf_vc_fwvi_se_e64m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvi_se_e64m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f32.nxv1f16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f64.nxv1i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f32.nxv1f16.f16.iXLen(iXLen, , , half, iXLen) +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f64.nxv1i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define void @test_f_sf_vc_fvw_se_e16mf2( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fvw_se_e16mf2: +define @test_sf_vc_fw_fwvi_se_e64m1( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 3 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f32.nxv2f16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv1f64.nxv1f32.nxv1i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f32.nxv2f16.f16.iXLen(iXLen, , , half, iXLen) +declare @llvm.riscv.sf.vc.v.ivw.se.nxv1f64.nxv1f32.nxv1i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define void @test_f_sf_vc_fvw_se_e16m1( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fvw_se_e16m1: +define void @test_sf_vc_fwvi_se_e64m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvi_se_e64m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v10, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 3 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f32.nxv4f16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f64.nxv2i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f32.nxv4f16.f16.iXLen(iXLen, , , half, iXLen) +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f64.nxv2i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define void @test_f_sf_vc_fvw_se_e16m2( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fvw_se_e16m2: +define @test_sf_vc_fw_fwvi_se_e64m2( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v12, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 3 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f32.nxv8f16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv2f64.nxv2f32.nxv2i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f32.nxv8f16.f16.iXLen(iXLen, , , half, iXLen) +declare @llvm.riscv.sf.vc.v.ivw.se.nxv2f64.nxv2f32.nxv2i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define void @test_f_sf_vc_fvw_se_e16m4( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fvw_se_e16m4: +define void @test_sf_vc_fwvi_se_e64m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvi_se_e64m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 3 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16f32.nxv16f16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f64.nxv4i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16f32.nxv16f16.f16.iXLen(iXLen, , , half, iXLen) +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f64.nxv4i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define void @test_f_sf_vc_fvw_se_e32mf2( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fvw_se_e32mf2: +define @test_sf_vc_fw_fwvi_se_e64m4( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 3 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f64.nxv1f32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv4f64.nxv4f32.nxv4i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f64.nxv1f32.f32.iXLen(iXLen, , , float, iXLen) +declare @llvm.riscv.sf.vc.v.ivw.se.nxv4f64.nxv4f32.nxv4i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define void @test_f_sf_vc_fvw_se_e32m1( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fvw_se_e32m1: +define void @test_sf_vc_fwvi_se_e64m8( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvi_se_e64m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v10, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.ivw 3, v8, v16, 3 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f64.nxv2f32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f64.nxv8i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f64.nxv2f32.f32.iXLen(iXLen, , , float, iXLen) +declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f64.nxv8i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define void @test_f_sf_vc_fvw_se_e32m2( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fvw_se_e32m2: +define @test_sf_vc_fw_fwvi_se_e64m8( %vd, %vs2, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v12, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 3 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f64.nxv4f32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret void + %0 = tail call @llvm.riscv.sf.vc.v.ivw.se.nxv8f64.nxv8f32.nxv8i32.iXLen.iXLen(iXLen 3, %vd, %vs2, iXLen 3, iXLen %vl) + ret %0 } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f64.nxv4f32.f32.iXLen(iXLen, , , float, iXLen) +declare @llvm.riscv.sf.vc.v.ivw.se.nxv8f64.nxv8f32.nxv8i32.iXLen.iXLen(iXLen, , , iXLen, iXLen) -define void @test_f_sf_vc_fvw_se_e32m4( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_fvw_se_e32m4: +define void @test_sf_vc_fwvf_se_e32mf2( %vd, %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvf_se_e32mf2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma -; CHECK-NEXT: sf.vc.fvw 1, v8, v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f64.nxv8f32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f32.nxv1i16.f16.iXLen(iXLen 1, %vd, %vs2, half %rs1, iXLen %vl) ret void } -declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f64.nxv8f32.f32.iXLen(iXLen, , , float, iXLen) +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f32.nxv1i16.f16.iXLen(iXLen, , , half, iXLen) -define @test_f_sf_vc_v_fvw_se_e16mf4( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e16mf4: +define @test_sf_vc_fw_fwvf_se_e32mf2( %vd, %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32mf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv1f32.iXLen.nxv1f16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) + %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv1f32.nxv1f16.nxv1i16.f16.iXLen(iXLen 1, %vd, %vs2, half %rs1, iXLen %vl) ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv1f32.iXLen.nxv1f16.f16.iXLen(iXLen, , , half, iXLen) +declare @llvm.riscv.sf.vc.v.fvw.se.nxv1f32.nxv1f16.nxv1i16.f16.iXLen(iXLen, , , half, iXLen) -define @test_f_sf_vc_v_fvw_se_e16mf2( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e16mf2: +define void @test_sf_vc_fwvf_se_e32m1( %vd, %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvf_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv2f32.iXLen.nxv2f16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f32.nxv2i16.f16.iXLen(iXLen 1, %vd, %vs2, half %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv2f32.iXLen.nxv2f16.f16.iXLen(iXLen, , , half, iXLen) +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f32.nxv2i16.f16.iXLen(iXLen, , , half, iXLen) -define @test_f_sf_vc_v_fvw_se_e16m1( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e16m1: +define @test_sf_vc_fw_fwvf_se_e32m1( %vd, %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv4f32.iXLen.nxv4f16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv2f32.nxv2f16.nxv2i16.f16.iXLen(iXLen 1, %vd, %vs2, half %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv4f32.iXLen.nxv4f16.f16.iXLen(iXLen, , , half, iXLen) +declare @llvm.riscv.sf.vc.v.fvw.se.nxv2f32.nxv2f16.nxv2i16.f16.iXLen(iXLen, , , half, iXLen) -define @test_f_sf_vc_v_fvw_se_e16m2( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e16m2: +define void @test_sf_vc_fwvf_se_e32m2( %vd, %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvf_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v10, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv8f32.iXLen.nxv8f16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f32.nxv4i16.f16.iXLen(iXLen 1, %vd, %vs2, half %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv8f32.iXLen.nxv8f16.f16.iXLen(iXLen, , , half, iXLen) +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f32.nxv4i16.f16.iXLen(iXLen, , , half, iXLen) -define @test_f_sf_vc_v_fvw_se_e16m4( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e16m4: +define @test_sf_vc_fw_fwvf_se_e32m2( %vd, %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv16f32.iXLen.nxv16f16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv4f32.nxv4f16.nxv4i16.f16.iXLen(iXLen 1, %vd, %vs2, half %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv16f32.iXLen.nxv16f16.f16.iXLen(iXLen, , , half, iXLen) +declare @llvm.riscv.sf.vc.v.fvw.se.nxv4f32.nxv4f16.nxv4i16.f16.iXLen(iXLen, , , half, iXLen) -define @test_f_sf_vc_v_fvw_se_e32mf2( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e32mf2: +define void @test_sf_vc_fwvf_se_e32m4( %vd, %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvf_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v12, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv1f64.iXLen.nxv1f32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f32.nxv8i16.f16.iXLen(iXLen 1, %vd, %vs2, half %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv1f64.iXLen.nxv1f32.f32.iXLen(iXLen, , , float, iXLen) +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f32.nxv8i16.f16.iXLen(iXLen, , , half, iXLen) -define @test_f_sf_vc_v_fvw_se_e32m1( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e32m1: +define @test_sf_vc_fw_fwvf_se_e32m4( %vd, %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv2f64.iXLen.nxv2f32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv8f32.nxv8f16.nxv8i16.f16.iXLen(iXLen 1, %vd, %vs2, half %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv2f64.iXLen.nxv2f32.f32.iXLen(iXLen, , , float, iXLen) +declare @llvm.riscv.sf.vc.v.fvw.se.nxv8f32.nxv8f16.nxv8i16.f16.iXLen(iXLen, , , half, iXLen) -define @test_f_sf_vc_v_fvw_se_e32m2( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e32m2: +define void @test_sf_vc_fwvf_se_e32m8( %vd, %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvf_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v16, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv4f64.iXLen.nxv4f32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16f32.nxv16i16.f16.iXLen(iXLen 1, %vd, %vs2, half %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv4f64.iXLen.nxv4f32.f32.iXLen(iXLen, , , float, iXLen) +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16f32.nxv16i16.f16.iXLen(iXLen, , , half, iXLen) -define @test_f_sf_vc_v_fvw_se_e32m4( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e32m4: +define @test_sf_vc_fw_fwvf_se_e32m8( %vd, %vs2, half %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v16, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv8f64.iXLen.nxv8f32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv16f32.nxv16f16.nxv16i16.f16.iXLen(iXLen 1, %vd, %vs2, half %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.se.nxv8f64.iXLen.nxv8f32.f32.iXLen(iXLen, , , float, iXLen) +declare @llvm.riscv.sf.vc.v.fvw.se.nxv16f32.nxv16f16.nxv16i16.f16.iXLen(iXLen, , , half, iXLen) -define @test_f_sf_vc_v_fvw_e16mf4( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvw_e16mf4: +define void @test_sf_vc_fwvf_se_e64m1( %vd, %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvf_se_e64m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv1f32.iXLen.nxv1f16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f64.nxv1i32.f32.iXLen(iXLen 1, %vd, %vs2, float %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fvw.nxv1f32.iXLen.nxv1f16.f16.iXLen(iXLen, , , half, iXLen) +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f64.nxv1i32.f32.iXLen(iXLen, , , float, iXLen) -define @test_f_sf_vc_v_fvw_e16mf2( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvw_e16mf2: +define @test_sf_vc_fw_fwvf_se_e64m1( %vd, %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv2f32.iXLen.nxv2f16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 -} - -declare @llvm.riscv.sf.vc.v.fvw.nxv2f32.iXLen.nxv2f16.f16.iXLen(iXLen, , , half, iXLen) - -define @test_f_sf_vc_v_fvw_e16m1( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvw_e16m1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0 -; CHECK-NEXT: ret -entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv4f32.iXLen.nxv4f16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv1f64.nxv1f32.nxv1i32.f32.iXLen(iXLen 1, %vd, %vs2, float %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.nxv4f32.iXLen.nxv4f16.f16.iXLen(iXLen, , , half, iXLen) +declare @llvm.riscv.sf.vc.v.fvw.se.nxv1f64.nxv1f32.nxv1i32.f32.iXLen(iXLen, , , float, iXLen) -define @test_f_sf_vc_v_fvw_e16m2( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvw_e16m2: +define void @test_sf_vc_fwvf_se_e64m2( %vd, %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvf_se_e64m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v10, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv8f32.iXLen.nxv8f16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f64.nxv2i32.f32.iXLen(iXLen 1, %vd, %vs2, float %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fvw.nxv8f32.iXLen.nxv8f16.f16.iXLen(iXLen, , , half, iXLen) +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f64.nxv2i32.f32.iXLen(iXLen, , , float, iXLen) -define @test_f_sf_vc_v_fvw_e16m4( %vd, %vs2, half %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvw_e16m4: +define @test_sf_vc_fw_fwvf_se_e64m2( %vd, %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv16f32.iXLen.nxv16f16.f16.iXLen(iXLen 1, %vd, %vs2, half %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv2f64.nxv2f32.nxv2i32.f32.iXLen(iXLen 1, %vd, %vs2, float %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.nxv16f32.iXLen.nxv16f16.f16.iXLen(iXLen, , , half, iXLen) +declare @llvm.riscv.sf.vc.v.fvw.se.nxv2f64.nxv2f32.nxv2i32.f32.iXLen(iXLen, , , float, iXLen) -define @test_f_sf_vc_v_fvw_e32mf2( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvw_e32mf2: +define void @test_sf_vc_fwvf_se_e64m4( %vd, %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvf_se_e64m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v12, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv1f64.iXLen.nxv1f32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f64.nxv4i32.f32.iXLen(iXLen 1, %vd, %vs2, float %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fvw.nxv1f64.iXLen.nxv1f32.f32.iXLen(iXLen, , , float, iXLen) +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f64.nxv4i32.f32.iXLen(iXLen, , , float, iXLen) -define @test_f_sf_vc_v_fvw_e32m1( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvw_e32m1: +define @test_sf_vc_fw_fwvf_se_e64m4( %vd, %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv2f64.iXLen.nxv2f32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 + %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv4f64.nxv4f32.nxv4i32.f32.iXLen(iXLen 1, %vd, %vs2, float %rs1, iXLen %vl) + ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.nxv2f64.iXLen.nxv2f32.f32.iXLen(iXLen, , , float, iXLen) +declare @llvm.riscv.sf.vc.v.fvw.se.nxv4f64.nxv4f32.nxv4i32.f32.iXLen(iXLen, , , float, iXLen) -define @test_f_sf_vc_v_fvw_e32m2( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvw_e32m2: +define void @test_sf_vc_fwvf_se_e64m8( %vd, %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fwvf_se_e64m8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma -; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vc.fvw 1, v8, v16, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv4f64.iXLen.nxv4f32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) - ret %0 + tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f64.nxv8i32.f32.iXLen(iXLen 1, %vd, %vs2, float %rs1, iXLen %vl) + ret void } -declare @llvm.riscv.sf.vc.v.fvw.nxv4f64.iXLen.nxv4f32.f32.iXLen(iXLen, , , float, iXLen) +declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f64.nxv8i32.f32.iXLen(iXLen, , , float, iXLen) -define @test_f_sf_vc_v_fvw_e32m4( %vd, %vs2, float %fs1, iXLen %vl) { -; CHECK-LABEL: test_f_sf_vc_v_fvw_e32m4: +define @test_sf_vc_fw_fwvf_se_e64m8( %vd, %vs2, float %rs1, iXLen %vl) { +; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v16, fa0 ; CHECK-NEXT: ret entry: - %0 = tail call @llvm.riscv.sf.vc.v.fvw.nxv8f64.iXLen.nxv8f32.f32.iXLen(iXLen 1, %vd, %vs2, float %fs1, iXLen %vl) + %0 = tail call @llvm.riscv.sf.vc.v.fvw.se.nxv8f64.nxv8f32.nxv8i32.f32.iXLen(iXLen 1, %vd, %vs2, float %rs1, iXLen %vl) ret %0 } -declare @llvm.riscv.sf.vc.v.fvw.nxv8f64.iXLen.nxv8f32.f32.iXLen(iXLen, , , float, iXLen) +declare @llvm.riscv.sf.vc.v.fvw.se.nxv8f64.nxv8f32.nxv8i32.f32.iXLen(iXLen, , , float, iXLen) +