diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-load.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-load.ll index caf0ae603fda9..ed27c9c7eb344 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-load.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-load.ll @@ -135,3 +135,56 @@ define <6 x i1> @load_v6i1(ptr %p) { %x = load <6 x i1>, ptr %p ret <6 x i1> %x } + + +define <4 x i32> @exact_vlen_i32_m1(ptr %p) vscale_range(2,2) { +; CHECK-LABEL: exact_vlen_i32_m1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: ret + %v = load <4 x i32>, ptr %p + ret <4 x i32> %v +} + +define <16 x i8> @exact_vlen_i8_m1(ptr %p) vscale_range(2,2) { +; CHECK-LABEL: exact_vlen_i8_m1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: ret + %v = load <16 x i8>, ptr %p + ret <16 x i8> %v +} + +define <32 x i8> @exact_vlen_i8_m2(ptr %p) vscale_range(2,2) { +; CHECK-LABEL: exact_vlen_i8_m2: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 32 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: ret + %v = load <32 x i8>, ptr %p + ret <32 x i8> %v +} + +define <128 x i8> @exact_vlen_i8_m8(ptr %p) vscale_range(2,2) { +; CHECK-LABEL: exact_vlen_i8_m8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 128 +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: ret + %v = load <128 x i8>, ptr %p + ret <128 x i8> %v +} + +define <16 x i64> @exact_vlen_i64_m8(ptr %p) vscale_range(2,2) { +; CHECK-LABEL: exact_vlen_i64_m8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: ret + %v = load <16 x i64>, ptr %p + ret <16 x i64> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll index 32d26827f989e..7c6c70221d851 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll @@ -237,6 +237,65 @@ define void @store_constant_v2i8_volatile(ptr %p) { store volatile <2 x i8> , ptr %p ret void } + + +define void @exact_vlen_i32_m1(ptr %p) vscale_range(2,2) { +; CHECK-LABEL: exact_vlen_i32_m1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: ret + store <4 x i32> zeroinitializer, ptr %p + ret void +} + +define void @exact_vlen_i8_m1(ptr %p) vscale_range(2,2) { +; CHECK-LABEL: exact_vlen_i8_m1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: ret + store <16 x i8> zeroinitializer, ptr %p + ret void +} + +define void @exact_vlen_i8_m2(ptr %p) vscale_range(2,2) { +; CHECK-LABEL: exact_vlen_i8_m2: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 32 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: ret + store <32 x i8> zeroinitializer, ptr %p + ret void +} + +define void @exact_vlen_i8_m8(ptr %p) vscale_range(2,2) { +; CHECK-LABEL: exact_vlen_i8_m8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 128 +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: ret + store <128 x i8> zeroinitializer, ptr %p + ret void +} + +define void @exact_vlen_i64_m8(ptr %p) vscale_range(2,2) { +; CHECK-LABEL: exact_vlen_i64_m8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: ret + store <16 x i64> zeroinitializer, ptr %p + ret void +} + ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; RV32: {{.*}} ; RV64: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll index 1114c7657c63d..1ea8925ba415b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll @@ -594,6 +594,40 @@ bb: ret i64 %tmp2 } + +define void @add_v128i8(ptr %x, ptr %y) vscale_range(2,2) { +; CHECK-LABEL: add_v128i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 128 +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vle8.v v16, (a1) +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: ret + %a = load <128 x i8>, ptr %x + %b = load <128 x i8>, ptr %y + %c = add <128 x i8> %a, %b + store <128 x i8> %c, ptr %x + ret void +} + +define void @add_v16i64(ptr %x, ptr %y) vscale_range(2,2) { +; CHECK-LABEL: add_v16i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vle64.v v16, (a1) +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: ret + %a = load <16 x i64>, ptr %x + %b = load <16 x i64>, ptr %y + %c = add <16 x i64> %a, %b + store <16 x i64> %c, ptr %x + ret void +} + declare @llvm.riscv.vadd.mask.nxv1i64.nxv1i64( , ,