36 changes: 18 additions & 18 deletions llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8(<vscale x 1 x i8>, i8, i32)
define <vscale x 1 x i8> @intrinsic_vmv.s.x_x_nxv1i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8,mf8,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e8,mf8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -19,7 +19,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8(<vscale x 2 x i8>, i8, i32)
define <vscale x 2 x i8> @intrinsic_vmv.s.x_x_nxv2i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8,mf4,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e8,mf4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -32,7 +32,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8(<vscale x 4 x i8>, i8, i32)
define <vscale x 4 x i8> @intrinsic_vmv.s.x_x_nxv4i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8,mf2,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e8,mf2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -45,7 +45,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8(<vscale x 8 x i8>, i8, i32)
define <vscale x 8 x i8> @intrinsic_vmv.s.x_x_nxv8i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e8,m1,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -58,7 +58,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8(<vscale x 16 x i8>, i8, i
define <vscale x 16 x i8> @intrinsic_vmv.s.x_x_nxv16i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e8,m2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -71,7 +71,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8(<vscale x 32 x i8>, i8, i
define <vscale x 32 x i8> @intrinsic_vmv.s.x_x_nxv32i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e8,m4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -84,7 +84,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8(<vscale x 64 x i8>, i8, i
define <vscale x 64 x i8> @intrinsic_vmv.s.x_x_nxv64i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8,m8,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e8,m8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -97,7 +97,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16(<vscale x 1 x i16>, i16,
define <vscale x 1 x i16> @intrinsic_vmv.s.x_x_nxv1i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -110,7 +110,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16(<vscale x 2 x i16>, i16,
define <vscale x 2 x i16> @intrinsic_vmv.s.x_x_nxv2i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -123,7 +123,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16(<vscale x 4 x i16>, i16,
define <vscale x 4 x i16> @intrinsic_vmv.s.x_x_nxv4i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -136,7 +136,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16(<vscale x 8 x i16>, i16,
define <vscale x 8 x i16> @intrinsic_vmv.s.x_x_nxv8i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -149,7 +149,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16(<vscale x 16 x i16>, i1
define <vscale x 16 x i16> @intrinsic_vmv.s.x_x_nxv16i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -162,7 +162,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16(<vscale x 32 x i16>, i1
define <vscale x 32 x i16> @intrinsic_vmv.s.x_x_nxv32i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16,m8,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e16,m8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -175,7 +175,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32(<vscale x 1 x i32>, i32,
define <vscale x 1 x i32> @intrinsic_vmv.s.x_x_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -188,7 +188,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32(<vscale x 2 x i32>, i32,
define <vscale x 2 x i32> @intrinsic_vmv.s.x_x_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -201,7 +201,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32(<vscale x 4 x i32>, i32,
define <vscale x 4 x i32> @intrinsic_vmv.s.x_x_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -214,7 +214,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32(<vscale x 8 x i32>, i32,
define <vscale x 8 x i32> @intrinsic_vmv.s.x_x_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -227,7 +227,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32(<vscale x 16 x i32>, i3
define <vscale x 16 x i32> @intrinsic_vmv.s.x_x_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e32,m8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand Down
44 changes: 22 additions & 22 deletions llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8(<vscale x 1 x i8>, i8, i64)
define <vscale x 1 x i8> @intrinsic_vmv.s.x_x_nxv1i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8,mf8,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e8,mf8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -19,7 +19,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8(<vscale x 2 x i8>, i8, i64)
define <vscale x 2 x i8> @intrinsic_vmv.s.x_x_nxv2i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8,mf4,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e8,mf4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -32,7 +32,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8(<vscale x 4 x i8>, i8, i64)
define <vscale x 4 x i8> @intrinsic_vmv.s.x_x_nxv4i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8,mf2,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e8,mf2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -45,7 +45,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8(<vscale x 8 x i8>, i8, i64)
define <vscale x 8 x i8> @intrinsic_vmv.s.x_x_nxv8i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e8,m1,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -58,7 +58,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8(<vscale x 16 x i8>, i8, i
define <vscale x 16 x i8> @intrinsic_vmv.s.x_x_nxv16i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e8,m2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -71,7 +71,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8(<vscale x 32 x i8>, i8, i
define <vscale x 32 x i8> @intrinsic_vmv.s.x_x_nxv32i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e8,m4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -84,7 +84,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8(<vscale x 64 x i8>, i8, i
define <vscale x 64 x i8> @intrinsic_vmv.s.x_x_nxv64i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8,m8,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e8,m8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -97,7 +97,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16(<vscale x 1 x i16>, i16,
define <vscale x 1 x i16> @intrinsic_vmv.s.x_x_nxv1i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -110,7 +110,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16(<vscale x 2 x i16>, i16,
define <vscale x 2 x i16> @intrinsic_vmv.s.x_x_nxv2i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -123,7 +123,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16(<vscale x 4 x i16>, i16,
define <vscale x 4 x i16> @intrinsic_vmv.s.x_x_nxv4i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -136,7 +136,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16(<vscale x 8 x i16>, i16,
define <vscale x 8 x i16> @intrinsic_vmv.s.x_x_nxv8i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -149,7 +149,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16(<vscale x 16 x i16>, i1
define <vscale x 16 x i16> @intrinsic_vmv.s.x_x_nxv16i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -162,7 +162,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16(<vscale x 32 x i16>, i1
define <vscale x 32 x i16> @intrinsic_vmv.s.x_x_nxv32i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16,m8,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e16,m8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -175,7 +175,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32(<vscale x 1 x i32>, i32,
define <vscale x 1 x i32> @intrinsic_vmv.s.x_x_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -188,7 +188,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32(<vscale x 2 x i32>, i32,
define <vscale x 2 x i32> @intrinsic_vmv.s.x_x_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -201,7 +201,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32(<vscale x 4 x i32>, i32,
define <vscale x 4 x i32> @intrinsic_vmv.s.x_x_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -214,7 +214,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32(<vscale x 8 x i32>, i32,
define <vscale x 8 x i32> @intrinsic_vmv.s.x_x_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -227,7 +227,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32(<vscale x 16 x i32>, i3
define <vscale x 16 x i32> @intrinsic_vmv.s.x_x_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e32,m8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -240,7 +240,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64(<vscale x 1 x i64>, i64,
define <vscale x 1 x i64> @intrinsic_vmv.s.x_x_nxv1i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e64,m1,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -253,7 +253,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmv.s.x.nxv2i64(<vscale x 2 x i64>, i64,
define <vscale x 2 x i64> @intrinsic_vmv.s.x_x_nxv2i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e64,m2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -266,7 +266,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmv.s.x.nxv4i64(<vscale x 4 x i64>, i64,
define <vscale x 4 x i64> @intrinsic_vmv.s.x_x_nxv4i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e64,m4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All @@ -279,7 +279,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64(<vscale x 8 x i64>, i64,
define <vscale x 8 x i64> @intrinsic_vmv.s.x_x_nxv8i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu
; CHECK-NEXT: vsetvli zero, a1, e64,m8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand Down
84 changes: 42 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll

Large diffs are not rendered by default.

84 changes: 42 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll

Large diffs are not rendered by default.

84 changes: 42 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll

Large diffs are not rendered by default.

84 changes: 42 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll

Large diffs are not rendered by default.

84 changes: 42 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll

Large diffs are not rendered by default.

84 changes: 42 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll

Large diffs are not rendered by default.

84 changes: 42 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll

Large diffs are not rendered by default.

84 changes: 42 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll

Large diffs are not rendered by default.

84 changes: 42 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll

Large diffs are not rendered by default.

84 changes: 42 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll

Large diffs are not rendered by default.

84 changes: 42 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll

Large diffs are not rendered by default.

84 changes: 42 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll

Large diffs are not rendered by default.

84 changes: 42 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll

Large diffs are not rendered by default.

84 changes: 42 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll

Large diffs are not rendered by default.

84 changes: 42 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll

Large diffs are not rendered by default.

84 changes: 42 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll

Large diffs are not rendered by default.

72 changes: 36 additions & 36 deletions llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll

Large diffs are not rendered by default.

72 changes: 36 additions & 36 deletions llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll

Large diffs are not rendered by default.

72 changes: 36 additions & 36 deletions llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll

Large diffs are not rendered by default.

72 changes: 36 additions & 36 deletions llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll

Large diffs are not rendered by default.

8 changes: 8 additions & 0 deletions llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
Original file line number Diff line number Diff line change
Expand Up @@ -578,14 +578,18 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
; RV32MV-NEXT: vsll.vi v26, v25, 1
; RV32MV-NEXT: addi a1, zero, 9
; RV32MV-NEXT: vmv.v.i v27, 10
; RV32MV-NEXT: vsetvli zero, zero, e16,mf2,tu,mu
; RV32MV-NEXT: vmv.s.x v27, a1
; RV32MV-NEXT: vsetvli zero, zero, e16,mf2,ta,mu
; RV32MV-NEXT: vsll.vv v26, v26, v27
; RV32MV-NEXT: addi a1, zero, 2047
; RV32MV-NEXT: vand.vx v25, v25, a1
; RV32MV-NEXT: vmv.v.i v27, 0
; RV32MV-NEXT: addi a2, zero, 1
; RV32MV-NEXT: vsetvli zero, zero, e16,mf2,tu,mu
; RV32MV-NEXT: vmv1r.v v28, v27
; RV32MV-NEXT: vmv.s.x v28, a2
; RV32MV-NEXT: vsetvli zero, zero, e16,mf2,ta,mu
; RV32MV-NEXT: lui a2, %hi(.LCPI4_1)
; RV32MV-NEXT: addi a2, a2, %lo(.LCPI4_1)
; RV32MV-NEXT: vle16.v v29, (a2)
Expand Down Expand Up @@ -639,14 +643,18 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
; RV64MV-NEXT: vsll.vi v26, v25, 1
; RV64MV-NEXT: addi a1, zero, 9
; RV64MV-NEXT: vmv.v.i v27, 10
; RV64MV-NEXT: vsetvli zero, zero, e16,mf2,tu,mu
; RV64MV-NEXT: vmv.s.x v27, a1
; RV64MV-NEXT: vsetvli zero, zero, e16,mf2,ta,mu
; RV64MV-NEXT: vsll.vv v26, v26, v27
; RV64MV-NEXT: addi a1, zero, 2047
; RV64MV-NEXT: vand.vx v25, v25, a1
; RV64MV-NEXT: vmv.v.i v27, 0
; RV64MV-NEXT: addi a2, zero, 1
; RV64MV-NEXT: vsetvli zero, zero, e16,mf2,tu,mu
; RV64MV-NEXT: vmv1r.v v28, v27
; RV64MV-NEXT: vmv.s.x v28, a2
; RV64MV-NEXT: vsetvli zero, zero, e16,mf2,ta,mu
; RV64MV-NEXT: lui a2, %hi(.LCPI4_1)
; RV64MV-NEXT: addi a2, a2, %lo(.LCPI4_1)
; RV64MV-NEXT: vle16.v v29, (a2)
Expand Down