12 changes: 4 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -961,8 +958,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.nxv8i64(
define <vscale x 8 x i64> @intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
9 changes: 3 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
12 changes: 4 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -961,8 +958,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.nxv8i64(
define <vscale x 8 x i64> @intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
9 changes: 3 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
12 changes: 4 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -961,8 +958,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64(
define <vscale x 8 x i64> @intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
9 changes: 3 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
12 changes: 4 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -961,8 +958,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64(
define <vscale x 8 x i64> @intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
9 changes: 3 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
12 changes: 4 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -961,8 +958,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64(
define <vscale x 8 x i64> @intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
9 changes: 3 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
12 changes: 4 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -961,8 +958,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64(
define <vscale x 8 x i64> @intrinsic_vmulhu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
9 changes: 3 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vor.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vor.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vor.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
12 changes: 4 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vor.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vor.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vor.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -961,8 +958,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64(
define <vscale x 8 x i64> @intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vor.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
9 changes: 3 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
12 changes: 4 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -961,8 +958,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.nxv8i64(
define <vscale x 8 x i64> @intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
9 changes: 3 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
12 changes: 4 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -961,8 +958,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.nxv8i64(
define <vscale x 8 x i64> @intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
18 changes: 6 additions & 12 deletions llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -302,8 +302,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.i32(
define <vscale x 64 x i8> @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -574,8 +573,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.i32(
define <vscale x 32 x i16> @intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -801,8 +799,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.i32(
define <vscale x 16 x i32> @intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -1073,8 +1070,7 @@ declare <vscale x 32 x half> @llvm.riscv.vrgather.vv.mask.nxv32f16.i32(
define <vscale x 32 x half> @intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -1300,8 +1296,7 @@ declare <vscale x 16 x float> @llvm.riscv.vrgather.vv.mask.nxv16f32.i32(
define <vscale x 16 x float> @intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -1482,8 +1477,7 @@ declare <vscale x 8 x double> @llvm.riscv.vrgather.vv.mask.nxv8f64.i32(
define <vscale x 8 x double> @intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
21 changes: 7 additions & 14 deletions llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -302,8 +302,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.i64(
define <vscale x 64 x i8> @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -574,8 +573,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.i64(
define <vscale x 32 x i16> @intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -801,8 +799,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.i64(
define <vscale x 16 x i32> @intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -983,8 +980,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vrgather.vv.mask.nxv8i64.i64(
define <vscale x 8 x i64> @intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -1255,8 +1251,7 @@ declare <vscale x 32 x half> @llvm.riscv.vrgather.vv.mask.nxv32f16.i64(
define <vscale x 32 x half> @intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -1482,8 +1477,7 @@ declare <vscale x 16 x float> @llvm.riscv.vrgather.vv.mask.nxv16f32.i64(
define <vscale x 16 x float> @intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -1664,8 +1658,7 @@ declare <vscale x 8 x double> @llvm.riscv.vrgather.vv.mask.nxv8f64.i64(
define <vscale x 8 x double> @intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
15 changes: 5 additions & 10 deletions llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -527,8 +527,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vrgatherei16.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -709,8 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
; CHECK-NEXT: vle16.v v28, (a0)
; CHECK-NEXT: vl4re16.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vrgatherei16.vv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -981,8 +979,7 @@ declare <vscale x 32 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv32f16(
define <vscale x 32 x half> @intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vrgatherei16.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -1163,8 +1160,7 @@ declare <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv16f32(
define <vscale x 16 x float> @intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
; CHECK-NEXT: vle16.v v28, (a0)
; CHECK-NEXT: vl4re16.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vrgatherei16.vv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -1255,8 +1251,7 @@ declare <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv8f64(
define <vscale x 8 x double> @intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m2,ta,mu
; CHECK-NEXT: vle16.v v26, (a0)
; CHECK-NEXT: vl2re16.v v26, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vrgatherei16.vv v8, v16, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
18 changes: 6 additions & 12 deletions llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -527,8 +527,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vrgatherei16.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -709,8 +708,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
; CHECK-NEXT: vle16.v v28, (a0)
; CHECK-NEXT: vl4re16.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vrgatherei16.vv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -801,8 +799,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv8i64(
define <vscale x 8 x i64> @intrinsic_vrgatherei16_mask_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m2,ta,mu
; CHECK-NEXT: vle16.v v26, (a0)
; CHECK-NEXT: vl2re16.v v26, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vrgatherei16.vv v8, v16, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -1073,8 +1070,7 @@ declare <vscale x 32 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv32f16(
define <vscale x 32 x half> @intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vrgatherei16.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -1255,8 +1251,7 @@ declare <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv16f32(
define <vscale x 16 x float> @intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
; CHECK-NEXT: vle16.v v28, (a0)
; CHECK-NEXT: vl4re16.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vrgatherei16.vv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -1347,8 +1342,7 @@ declare <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv8f64(
define <vscale x 8 x double> @intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m2,ta,mu
; CHECK-NEXT: vle16.v v26, (a0)
; CHECK-NEXT: vl2re16.v v26, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vrgatherei16.vv v8, v16, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
9 changes: 3 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
12 changes: 4 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -961,8 +958,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64(
define <vscale x 8 x i64> @intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
9 changes: 3 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
12 changes: 4 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -961,8 +958,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64(
define <vscale x 8 x i64> @intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
9 changes: 3 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
12 changes: 4 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -961,8 +958,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64(
define <vscale x 8 x i64> @intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
9 changes: 3 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
12 changes: 4 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -961,8 +958,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64(
define <vscale x 8 x i64> @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
9 changes: 3 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
12 changes: 4 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -961,8 +958,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.nxv8i64(
define <vscale x 8 x i64> @intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
9 changes: 3 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
12 changes: 4 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -961,8 +958,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64(
define <vscale x 8 x i64> @intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
9 changes: 3 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
12 changes: 4 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -961,8 +958,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64(
define <vscale x 8 x i64> @intrinsic_vssra_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
9 changes: 3 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
12 changes: 4 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -961,8 +958,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64(
define <vscale x 8 x i64> @intrinsic_vssrl_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
9 changes: 3 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
12 changes: 4 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -961,8 +958,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.nxv8i64(
define <vscale x 8 x i64> @intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
9 changes: 3 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
12 changes: 4 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -961,8 +958,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64(
define <vscale x 8 x i64> @intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
9 changes: 3 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
12 changes: 4 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8(
define <vscale x 64 x i8> @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -561,8 +560,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16(
define <vscale x 32 x i16> @intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v24, (a0)
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -783,8 +781,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32(
define <vscale x 16 x i32> @intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down Expand Up @@ -961,8 +958,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64(
define <vscale x 8 x i64> @intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
Expand Down
Loading