Expand Up
@@ -6,7 +6,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8(<vscale x 1 x i8>, i8, i64)
define <vscale x 1 x i8 > @intrinsic_vmv.s.x_x_nxv1i8 (<vscale x 1 x i8 > %0 , i8 %1 , i64 %2 ) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8,mf8,ta ,mu
; CHECK-NEXT: vsetvli zero, a1, e8,mf8,tu ,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All
@@ -19,7 +19,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8(<vscale x 2 x i8>, i8, i64)
define <vscale x 2 x i8 > @intrinsic_vmv.s.x_x_nxv2i8 (<vscale x 2 x i8 > %0 , i8 %1 , i64 %2 ) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8,mf4,ta ,mu
; CHECK-NEXT: vsetvli zero, a1, e8,mf4,tu ,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All
@@ -32,7 +32,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8(<vscale x 4 x i8>, i8, i64)
define <vscale x 4 x i8 > @intrinsic_vmv.s.x_x_nxv4i8 (<vscale x 4 x i8 > %0 , i8 %1 , i64 %2 ) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8,mf2,ta ,mu
; CHECK-NEXT: vsetvli zero, a1, e8,mf2,tu ,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All
@@ -45,7 +45,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8(<vscale x 8 x i8>, i8, i64)
define <vscale x 8 x i8 > @intrinsic_vmv.s.x_x_nxv8i8 (<vscale x 8 x i8 > %0 , i8 %1 , i64 %2 ) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta ,mu
; CHECK-NEXT: vsetvli zero, a1, e8,m1,tu ,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All
@@ -58,7 +58,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8(<vscale x 16 x i8>, i8, i
define <vscale x 16 x i8 > @intrinsic_vmv.s.x_x_nxv16i8 (<vscale x 16 x i8 > %0 , i8 %1 , i64 %2 ) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta ,mu
; CHECK-NEXT: vsetvli zero, a1, e8,m2,tu ,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All
@@ -71,7 +71,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8(<vscale x 32 x i8>, i8, i
define <vscale x 32 x i8 > @intrinsic_vmv.s.x_x_nxv32i8 (<vscale x 32 x i8 > %0 , i8 %1 , i64 %2 ) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta ,mu
; CHECK-NEXT: vsetvli zero, a1, e8,m4,tu ,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All
@@ -84,7 +84,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8(<vscale x 64 x i8>, i8, i
define <vscale x 64 x i8 > @intrinsic_vmv.s.x_x_nxv64i8 (<vscale x 64 x i8 > %0 , i8 %1 , i64 %2 ) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8,m8,ta ,mu
; CHECK-NEXT: vsetvli zero, a1, e8,m8,tu ,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All
@@ -97,7 +97,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16(<vscale x 1 x i16>, i16,
define <vscale x 1 x i16 > @intrinsic_vmv.s.x_x_nxv1i16 (<vscale x 1 x i16 > %0 , i16 %1 , i64 %2 ) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta ,mu
; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu ,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All
@@ -110,7 +110,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16(<vscale x 2 x i16>, i16,
define <vscale x 2 x i16 > @intrinsic_vmv.s.x_x_nxv2i16 (<vscale x 2 x i16 > %0 , i16 %1 , i64 %2 ) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta ,mu
; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu ,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All
@@ -123,7 +123,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16(<vscale x 4 x i16>, i16,
define <vscale x 4 x i16 > @intrinsic_vmv.s.x_x_nxv4i16 (<vscale x 4 x i16 > %0 , i16 %1 , i64 %2 ) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta ,mu
; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu ,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All
@@ -136,7 +136,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16(<vscale x 8 x i16>, i16,
define <vscale x 8 x i16 > @intrinsic_vmv.s.x_x_nxv8i16 (<vscale x 8 x i16 > %0 , i16 %1 , i64 %2 ) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta ,mu
; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu ,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All
@@ -149,7 +149,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16(<vscale x 16 x i16>, i1
define <vscale x 16 x i16 > @intrinsic_vmv.s.x_x_nxv16i16 (<vscale x 16 x i16 > %0 , i16 %1 , i64 %2 ) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta ,mu
; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu ,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All
@@ -162,7 +162,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16(<vscale x 32 x i16>, i1
define <vscale x 32 x i16 > @intrinsic_vmv.s.x_x_nxv32i16 (<vscale x 32 x i16 > %0 , i16 %1 , i64 %2 ) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16,m8,ta ,mu
; CHECK-NEXT: vsetvli zero, a1, e16,m8,tu ,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All
@@ -175,7 +175,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32(<vscale x 1 x i32>, i32,
define <vscale x 1 x i32 > @intrinsic_vmv.s.x_x_nxv1i32 (<vscale x 1 x i32 > %0 , i32 %1 , i64 %2 ) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta ,mu
; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu ,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All
@@ -188,7 +188,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32(<vscale x 2 x i32>, i32,
define <vscale x 2 x i32 > @intrinsic_vmv.s.x_x_nxv2i32 (<vscale x 2 x i32 > %0 , i32 %1 , i64 %2 ) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta ,mu
; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu ,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All
@@ -201,7 +201,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32(<vscale x 4 x i32>, i32,
define <vscale x 4 x i32 > @intrinsic_vmv.s.x_x_nxv4i32 (<vscale x 4 x i32 > %0 , i32 %1 , i64 %2 ) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta ,mu
; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu ,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All
@@ -214,7 +214,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32(<vscale x 8 x i32>, i32,
define <vscale x 8 x i32 > @intrinsic_vmv.s.x_x_nxv8i32 (<vscale x 8 x i32 > %0 , i32 %1 , i64 %2 ) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta ,mu
; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu ,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All
@@ -227,7 +227,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32(<vscale x 16 x i32>, i3
define <vscale x 16 x i32 > @intrinsic_vmv.s.x_x_nxv16i32 (<vscale x 16 x i32 > %0 , i32 %1 , i64 %2 ) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta ,mu
; CHECK-NEXT: vsetvli zero, a1, e32,m8,tu ,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All
@@ -240,7 +240,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64(<vscale x 1 x i64>, i64,
define <vscale x 1 x i64 > @intrinsic_vmv.s.x_x_nxv1i64 (<vscale x 1 x i64 > %0 , i64 %1 , i64 %2 ) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta ,mu
; CHECK-NEXT: vsetvli zero, a1, e64,m1,tu ,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All
@@ -253,7 +253,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmv.s.x.nxv2i64(<vscale x 2 x i64>, i64,
define <vscale x 2 x i64 > @intrinsic_vmv.s.x_x_nxv2i64 (<vscale x 2 x i64 > %0 , i64 %1 , i64 %2 ) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta ,mu
; CHECK-NEXT: vsetvli zero, a1, e64,m2,tu ,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All
@@ -266,7 +266,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmv.s.x.nxv4i64(<vscale x 4 x i64>, i64,
define <vscale x 4 x i64 > @intrinsic_vmv.s.x_x_nxv4i64 (<vscale x 4 x i64 > %0 , i64 %1 , i64 %2 ) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta ,mu
; CHECK-NEXT: vsetvli zero, a1, e64,m4,tu ,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand All
@@ -279,7 +279,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64(<vscale x 8 x i64>, i64,
define <vscale x 8 x i64 > @intrinsic_vmv.s.x_x_nxv8i64 (<vscale x 8 x i64 > %0 , i64 %1 , i64 %2 ) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta ,mu
; CHECK-NEXT: vsetvli zero, a1, e64,m8,tu ,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
Expand Down