diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll index e13604ee6b7f3..d9a13afbce6ca 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll @@ -82,6 +82,18 @@ define {<16 x i8>, <16 x i8>} @vector_deinterleave_load_v16i8_v32i8(ptr %p) { ret {<16 x i8>, <16 x i8>} %retval } +; FIXME: Shouldn't be lowered to vlseg because it's unaligned +define {<8 x i16>, <8 x i16>} @vector_deinterleave_load_v8i16_v16i16_align1(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_v8i16_v16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vlseg2e16.v v8, (a0) +; CHECK-NEXT: ret + %vec = load <16 x i16>, ptr %p, align 1 + %retval = call {<8 x i16>, <8 x i16>} @llvm.experimental.vector.deinterleave2.v16i16(<16 x i16> %vec) + ret {<8 x i16>, <8 x i16>} %retval +} + define {<8 x i16>, <8 x i16>} @vector_deinterleave_load_v8i16_v16i16(ptr %p) { ; CHECK-LABEL: vector_deinterleave_load_v8i16_v16i16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleave-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleave-store.ll index e2ff4ecd18403..25c7e851f422b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleave-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleave-store.ll @@ -29,6 +29,18 @@ define void @vector_interleave_store_v32i1_v16i1(<16 x i1> %a, <16 x i1> %b, ptr ret void } +; FIXME: Shouldn't be lowered to vsseg because it's unaligned +define void @vector_interleave_store_v16i16_v8i16_align1(<8 x i16> %a, <8 x i16> %b, ptr %p) { +; CHECK-LABEL: vector_interleave_store_v16i16_v8i16_align1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: ret + %res = call <16 x i16> @llvm.experimental.vector.interleave2.v16i16(<8 x i16> %a, <8 x i16> %b) + store <16 x i16> %res, ptr %p, align 1 + ret void +} + define void @vector_interleave_store_v16i16_v8i16(<8 x i16> %a, <8 x i16> %b, ptr %p) { ; CHECK-LABEL: vector_interleave_store_v16i16_v8i16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll index 5536a6ebc1510..326fd78c3cc3e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll @@ -39,6 +39,18 @@ define {, } @vector_deinterleave_load_nxv16i ret {, } %retval } +; FIXME: Shouldn't be lowered to vlseg because it's unaligned +define {, } @vector_deinterleave_load_nxv8i16_nxv16i16_align1(ptr %p) { +; CHECK-LABEL: vector_deinterleave_load_nxv8i16_nxv16i16_align1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-NEXT: vlseg2e16.v v8, (a0) +; CHECK-NEXT: ret + %vec = load , ptr %p, align 1 + %retval = call {, } @llvm.experimental.vector.deinterleave2.nxv16i16( %vec) + ret {, } %retval +} + define {, } @vector_deinterleave_load_nxv8i16_nxv16i16(ptr %p) { ; CHECK-LABEL: vector_deinterleave_load_nxv8i16_nxv16i16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll index 50dd979bc7991..909dc3461c5ca 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll @@ -32,6 +32,18 @@ define void @vector_interleave_store_nxv32i1_nxv16i1( %a, %a, %b, ptr %p) { +; CHECK-LABEL: vector_interleave_store_nxv16i16_nxv8i16_align1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.interleave2.nxv16i16( %a, %b) + store %res, ptr %p, align 1 + ret void +} + define void @vector_interleave_store_nxv16i16_nxv8i16( %a, %b, ptr %p) { ; CHECK-LABEL: vector_interleave_store_nxv16i16_nxv8i16: ; CHECK: # %bb.0: