diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-vector-interleaved-access.ll b/llvm/test/CodeGen/RISCV/rvv/vp-vector-interleaved-access.ll index 13a836e8a7552..63fd892058811 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vp-vector-interleaved-access.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vp-vector-interleaved-access.ll @@ -2,20 +2,12 @@ ; RUN: llc < %s -mtriple=riscv32 -mattr=+v,m -O2 | FileCheck -check-prefixes=CHECK,RV32 %s ; RUN: llc < %s -mtriple=riscv64 -mattr=+v,m -O2 | FileCheck -check-prefixes=CHECK,RV64 %s -define {, } @load_factor2_v2(ptr %ptr, i32 %evl) { -; RV32-LABEL: load_factor2_v2: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; RV32-NEXT: vlseg2e32.v v8, (a0) -; RV32-NEXT: ret -; -; RV64-LABEL: load_factor2_v2: -; RV64: # %bb.0: -; RV64-NEXT: slli a1, a1, 32 -; RV64-NEXT: srli a1, a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; RV64-NEXT: vlseg2e32.v v8, (a0) -; RV64-NEXT: ret +define {, } @load_factor2_v2(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: load_factor2_v2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg2e32.v v8, (a0) +; CHECK-NEXT: ret %rvl = mul nuw i32 %evl, 2 %wide.masked.load = call @llvm.vp.load.nxv4i32.p0(ptr %ptr, splat (i1 true), i32 %rvl) %deinterleaved.results = call { , } @llvm.vector.deinterleave2.nxv4i32( %wide.masked.load) @@ -26,20 +18,12 @@ define {, } @load_factor2_v2(ptr %ptr, i32 % ret { , } %res1 } -define {, , } @load_factor3_v2(ptr %ptr, i32 %evl) { -; RV32-LABEL: load_factor3_v2: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; RV32-NEXT: vlseg3e32.v v8, (a0) -; RV32-NEXT: ret -; -; RV64-LABEL: load_factor3_v2: -; RV64: # %bb.0: -; RV64-NEXT: slli a1, a1, 32 -; RV64-NEXT: srli a1, a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; RV64-NEXT: vlseg3e32.v v8, (a0) -; RV64-NEXT: ret +define {, , } @load_factor3_v2(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: load_factor3_v2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg3e32.v v8, (a0) +; CHECK-NEXT: ret %rvl = mul nuw i32 %evl, 3 %wide.masked.load = call @llvm.vp.load(ptr %ptr, splat (i1 true), i32 %rvl) %deinterleaved.results = call { , , } @llvm.vector.deinterleave3( %wide.masked.load) @@ -52,22 +36,13 @@ define {, , } @load_factor ret { , , } %res1 } -define {, } @load_factor3_partial(ptr %ptr, i32 %evl) { -; RV32-LABEL: load_factor3_partial: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; RV32-NEXT: vlseg3e32.v v7, (a0) -; RV32-NEXT: vmv1r.v v8, v7 -; RV32-NEXT: ret -; -; RV64-LABEL: load_factor3_partial: -; RV64: # %bb.0: -; RV64-NEXT: slli a1, a1, 32 -; RV64-NEXT: srli a1, a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; RV64-NEXT: vlseg3e32.v v7, (a0) -; RV64-NEXT: vmv1r.v v8, v7 -; RV64-NEXT: ret +define {, } @load_factor3_partial(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: load_factor3_partial: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg3e32.v v7, (a0) +; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: ret %rvl = mul nuw i32 %evl, 3 %wide.masked.load = call @llvm.vp.load(ptr %ptr, splat (i1 true), i32 %rvl) %deinterleaved.results = call { , , } @llvm.vector.deinterleave3( %wide.masked.load) @@ -79,41 +54,22 @@ define {, } @load_factor3_partial(ptr %ptr, } ; InterleavedAccess should kick in even if the users of deinterleave intrinsic are not extractvalue. -define {, } @load_factor3_no_extract(ptr %ptr, i32 %evl) { -; RV32-LABEL: load_factor3_no_extract: -; RV32: # %bb.0: -; RV32-NEXT: li a2, 12 -; RV32-NEXT: beq a1, a2, .LBB3_2 -; RV32-NEXT: # %bb.1: # %bb0 -; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; RV32-NEXT: vlseg3e32.v v7, (a0) -; RV32-NEXT: j .LBB3_3 -; RV32-NEXT: .LBB3_2: # %bb1 -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV32-NEXT: vlseg3e32.v v7, (a0) -; RV32-NEXT: .LBB3_3: # %merge -; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma -; RV32-NEXT: vmv1r.v v8, v7 -; RV32-NEXT: ret -; -; RV64-LABEL: load_factor3_no_extract: -; RV64: # %bb.0: -; RV64-NEXT: sext.w a2, a1 -; RV64-NEXT: li a3, 12 -; RV64-NEXT: beq a2, a3, .LBB3_2 -; RV64-NEXT: # %bb.1: # %bb0 -; RV64-NEXT: slli a1, a1, 32 -; RV64-NEXT: srli a1, a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; RV64-NEXT: vlseg3e32.v v7, (a0) -; RV64-NEXT: j .LBB3_3 -; RV64-NEXT: .LBB3_2: # %bb1 -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV64-NEXT: vlseg3e32.v v7, (a0) -; RV64-NEXT: .LBB3_3: # %merge -; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma -; RV64-NEXT: vmv1r.v v8, v7 -; RV64-NEXT: ret +define {, } @load_factor3_no_extract(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: load_factor3_no_extract: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 12 +; CHECK-NEXT: beq a1, a2, .LBB3_2 +; CHECK-NEXT: # %bb.1: # %bb0 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg3e32.v v7, (a0) +; CHECK-NEXT: j .LBB3_3 +; CHECK-NEXT: .LBB3_2: # %bb1 +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vlseg3e32.v v7, (a0) +; CHECK-NEXT: .LBB3_3: # %merge +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: ret %p = icmp ne i32 %evl, 12 br i1 %p, label %bb0, label %bb1 @@ -137,20 +93,12 @@ merge: ret { , } %res1 } -define {, , , } @load_factor4_v2(ptr %ptr, i32 %evl) { -; RV32-LABEL: load_factor4_v2: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; RV32-NEXT: vlseg4e32.v v8, (a0) -; RV32-NEXT: ret -; -; RV64-LABEL: load_factor4_v2: -; RV64: # %bb.0: -; RV64-NEXT: slli a1, a1, 32 -; RV64-NEXT: srli a1, a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; RV64-NEXT: vlseg4e32.v v8, (a0) -; RV64-NEXT: ret +define {, , , } @load_factor4_v2(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: load_factor4_v2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg4e32.v v8, (a0) +; CHECK-NEXT: ret %rvl = mul nuw i32 %evl, 4 %wide.masked.load = call @llvm.vp.load.nxv8i32.p0(ptr %ptr, splat (i1 true), i32 %rvl) %d = call { , , , } @llvm.vector.deinterleave4.nxv8i32( %wide.masked.load) @@ -166,20 +114,12 @@ define {, , , , , , } %res3 } -define {, , , , } @load_factor5_v2(ptr %ptr, i32 %evl) { -; RV32-LABEL: load_factor5_v2: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; RV32-NEXT: vlseg5e32.v v8, (a0) -; RV32-NEXT: ret -; -; RV64-LABEL: load_factor5_v2: -; RV64: # %bb.0: -; RV64-NEXT: slli a1, a1, 32 -; RV64-NEXT: srli a1, a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; RV64-NEXT: vlseg5e32.v v8, (a0) -; RV64-NEXT: ret +define {, , , , } @load_factor5_v2(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: load_factor5_v2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg5e32.v v8, (a0) +; CHECK-NEXT: ret %rvl = mul nuw i32 %evl, 5 %wide.masked.load = call @llvm.vp.load(ptr %ptr, splat (i1 true), i32 %rvl) %deinterleaved.results = call { , , , , } @llvm.vector.deinterleave5( %wide.masked.load) @@ -196,20 +136,12 @@ define {, , , , , , , } %res4 } -define {, , , , , , } @load_factor7_v2(ptr %ptr, i32 %evl) { -; RV32-LABEL: load_factor7_v2: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; RV32-NEXT: vlseg7e32.v v8, (a0) -; RV32-NEXT: ret -; -; RV64-LABEL: load_factor7_v2: -; RV64: # %bb.0: -; RV64-NEXT: slli a1, a1, 32 -; RV64-NEXT: srli a1, a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; RV64-NEXT: vlseg7e32.v v8, (a0) -; RV64-NEXT: ret +define {, , , , , , } @load_factor7_v2(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: load_factor7_v2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg7e32.v v8, (a0) +; CHECK-NEXT: ret %rvl = mul nuw i32 %evl, 7 %wide.masked.load = call @llvm.vp.load(ptr %ptr, splat (i1 true), i32 %rvl) %deinterleaved.results = call { , , , , , , } @llvm.vector.deinterleave7( %wide.masked.load) @@ -230,20 +162,12 @@ define {, , , , , , , , , } %res6 } -define {, , , , , , , } @load_factor8_v2(ptr %ptr, i32 %evl) { -; RV32-LABEL: load_factor8_v2: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; RV32-NEXT: vlseg8e32.v v8, (a0) -; RV32-NEXT: ret -; -; RV64-LABEL: load_factor8_v2: -; RV64: # %bb.0: -; RV64-NEXT: slli a1, a1, 32 -; RV64-NEXT: srli a1, a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; RV64-NEXT: vlseg8e32.v v8, (a0) -; RV64-NEXT: ret +define {, , , , , , , } @load_factor8_v2(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: load_factor8_v2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg8e32.v v8, (a0) +; CHECK-NEXT: ret %rvl = mul nuw i32 %evl, 8 %wide.masked.load = call @llvm.vp.load.nxv16i32.p0(ptr %ptr, splat (i1 true), i32 %rvl) %d = call { , , , , , , , } @llvm.vector.deinterleave8.nxv16i32( %wide.masked.load) @@ -267,47 +191,31 @@ define {, , , , , , , , , , } %res7 } -define void @store_factor2_v2( %v0, %v1, ptr %ptr, i32 %evl) { -; RV32-LABEL: store_factor2_v2: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; RV32-NEXT: vsseg2e32.v v8, (a0) -; RV32-NEXT: ret -; -; RV64-LABEL: store_factor2_v2: -; RV64: # %bb.0: -; RV64-NEXT: slli a1, a1, 32 -; RV64-NEXT: srli a1, a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; RV64-NEXT: vsseg2e32.v v8, (a0) -; RV64-NEXT: ret +define void @store_factor2_v2( %v0, %v1, ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: store_factor2_v2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: ret %rvl = mul nuw i32 %evl, 2 %interleaved.vec = call @llvm.vector.interleave2.nxv2i32( %v0, %v1) call void @llvm.vp.store.nxv2i32.p0( %interleaved.vec, ptr %ptr, splat (i1 true), i32 %rvl) ret void } -define void @store_factor3_v2( %v0, %v1, %v2, ptr %ptr, i32 %evl) { -; RV32-LABEL: store_factor3_v2: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; RV32-NEXT: vsseg3e32.v v8, (a0) -; RV32-NEXT: ret -; -; RV64-LABEL: store_factor3_v2: -; RV64: # %bb.0: -; RV64-NEXT: slli a1, a1, 32 -; RV64-NEXT: srli a1, a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; RV64-NEXT: vsseg3e32.v v8, (a0) -; RV64-NEXT: ret +define void @store_factor3_v2( %v0, %v1, %v2, ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: store_factor3_v2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: ret %rvl = mul nuw i32 %evl, 3 %interleaved.vec = call @llvm.vector.interleave3( %v0, %v1, %v2) call void @llvm.vp.store( %interleaved.vec, ptr %ptr, splat (i1 true), i32 %rvl) ret void } -define void @store_factor4_v2( %v0, %v1, ptr %ptr, i32 %evl) { +define void @store_factor4_v2( %v0, %v1, ptr %ptr, i32 zeroext %evl) { ; RV32-LABEL: store_factor4_v2: ; RV32: # %bb.0: ; RV32-NEXT: slli a1, a1, 3 @@ -333,92 +241,54 @@ define void @store_factor4_v2( %v0, %v1, pt ret void } -define void @store_factor5_v2( %v0, %v1, %v2, %v3, %v4, ptr %ptr, i32 %evl) { -; RV32-LABEL: store_factor5_v2: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; RV32-NEXT: vsseg5e32.v v8, (a0) -; RV32-NEXT: ret -; -; RV64-LABEL: store_factor5_v2: -; RV64: # %bb.0: -; RV64-NEXT: slli a1, a1, 32 -; RV64-NEXT: srli a1, a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; RV64-NEXT: vsseg5e32.v v8, (a0) -; RV64-NEXT: ret +define void @store_factor5_v2( %v0, %v1, %v2, %v3, %v4, ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: store_factor5_v2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg5e32.v v8, (a0) +; CHECK-NEXT: ret %rvl = mul nuw i32 %evl, 5 %interleaved.vec = call @llvm.vector.interleave5( %v0, %v1, %v2, %v3, %v4) call void @llvm.vp.store( %interleaved.vec, ptr %ptr, splat (i1 true), i32 %rvl) ret void } -define void @store_factor7_v2( %v0, %v1, %v2, %v3, %v4, %v5, %v6, ptr %ptr, i32 %evl) { -; RV32-LABEL: store_factor7_v2: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; RV32-NEXT: vsseg7e32.v v8, (a0) -; RV32-NEXT: ret -; -; RV64-LABEL: store_factor7_v2: -; RV64: # %bb.0: -; RV64-NEXT: slli a1, a1, 32 -; RV64-NEXT: srli a1, a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; RV64-NEXT: vsseg7e32.v v8, (a0) -; RV64-NEXT: ret +define void @store_factor7_v2( %v0, %v1, %v2, %v3, %v4, %v5, %v6, ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: store_factor7_v2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsseg7e32.v v8, (a0) +; CHECK-NEXT: ret %rvl = mul nuw i32 %evl, 7 %interleaved.vec = call @llvm.vector.interleave7( %v0, %v1, %v2, %v3, %v4, %v5, %v6) call void @llvm.vp.store( %interleaved.vec, ptr %ptr, splat (i1 true), i32 %rvl) ret void } -define void @store_factor8_v2( %v0, %v1, ptr %ptr, i32 %evl) { -; RV32-LABEL: store_factor8_v2: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; RV32-NEXT: vmv1r.v v10, v8 -; RV32-NEXT: vmv1r.v v11, v9 -; RV32-NEXT: vmv1r.v v12, v8 -; RV32-NEXT: vmv1r.v v13, v9 -; RV32-NEXT: vmv1r.v v14, v8 -; RV32-NEXT: vmv1r.v v15, v9 -; RV32-NEXT: vsseg8e32.v v8, (a0) -; RV32-NEXT: ret -; -; RV64-LABEL: store_factor8_v2: -; RV64: # %bb.0: -; RV64-NEXT: slli a1, a1, 32 -; RV64-NEXT: srli a1, a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; RV64-NEXT: vmv1r.v v10, v8 -; RV64-NEXT: vmv1r.v v11, v9 -; RV64-NEXT: vmv1r.v v12, v8 -; RV64-NEXT: vmv1r.v v13, v9 -; RV64-NEXT: vmv1r.v v14, v8 -; RV64-NEXT: vmv1r.v v15, v9 -; RV64-NEXT: vsseg8e32.v v8, (a0) -; RV64-NEXT: ret +define void @store_factor8_v2( %v0, %v1, ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: store_factor8_v2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v9 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v9 +; CHECK-NEXT: vmv1r.v v14, v8 +; CHECK-NEXT: vmv1r.v v15, v9 +; CHECK-NEXT: vsseg8e32.v v8, (a0) +; CHECK-NEXT: ret %rvl = mul nuw i32 %evl, 8 %interleaved.vec = call @llvm.vector.interleave8.nxv8i32( %v0, %v1, %v0, %v1, %v0, %v1, %v0, %v1) call void @llvm.vp.store.nxv8i32.p0( %interleaved.vec, ptr %ptr, splat (i1 true), i32 %rvl) ret void } -define {, } @masked_load_factor2_v2( %mask, ptr %ptr, i32 %evl) { -; RV32-LABEL: masked_load_factor2_v2: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; RV32-NEXT: vlseg2e32.v v8, (a0), v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: masked_load_factor2_v2: -; RV64: # %bb.0: -; RV64-NEXT: slli a1, a1, 32 -; RV64-NEXT: srli a1, a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; RV64-NEXT: vlseg2e32.v v8, (a0), v0.t -; RV64-NEXT: ret +define {, } @masked_load_factor2_v2( %mask, ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: masked_load_factor2_v2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: ret %rvl = mul nuw i32 %evl, 2 %interleaved.mask = tail call @llvm.vector.interleave2.nxv4i1( %mask, %mask) %wide.masked.load = tail call @llvm.vp.load.nxv4i32.p0(ptr %ptr, %interleaved.mask, i32 %rvl) @@ -430,20 +300,12 @@ define {, } @masked_load_factor2_v2(, } %res1 } -define {, , , } @masked_load_factor4_v2( %mask, ptr %ptr, i32 %evl) { -; RV32-LABEL: masked_load_factor4_v2: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; RV32-NEXT: vlseg4e32.v v8, (a0), v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: masked_load_factor4_v2: -; RV64: # %bb.0: -; RV64-NEXT: slli a1, a1, 32 -; RV64-NEXT: srli a1, a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; RV64-NEXT: vlseg4e32.v v8, (a0), v0.t -; RV64-NEXT: ret +define {, , , } @masked_load_factor4_v2( %mask, ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: masked_load_factor4_v2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg4e32.v v8, (a0), v0.t +; CHECK-NEXT: ret %rvl = mul nuw i32 %evl, 4 %interleaved.mask = call @llvm.vector.interleave4.nxv8i1( %mask, %mask, %mask, %mask) %wide.masked.load = call @llvm.vp.load.nxv8i32.p0(ptr %ptr, %interleaved.mask, i32 %rvl) @@ -460,23 +322,13 @@ define {, , , , , , } %res3 } -define void @masked_store_factor2_v2( %mask, %v0, %v1, ptr %ptr, i32 %evl) { -; RV32-LABEL: masked_store_factor2_v2: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; RV32-NEXT: vmv1r.v v9, v8 -; RV32-NEXT: vsseg2e32.v v8, (a0), v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: masked_store_factor2_v2: -; RV64: # %bb.0: -; RV64-NEXT: slli a1, a1, 32 -; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma -; RV64-NEXT: vmv1r.v v9, v8 -; RV64-NEXT: srli a1, a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; RV64-NEXT: vsseg2e32.v v8, (a0), v0.t -; RV64-NEXT: ret +define void @masked_store_factor2_v2( %mask, %v0, %v1, ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: masked_store_factor2_v2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: ret %rvl = mul nuw i32 %evl, 2 %interleaved.mask = tail call @llvm.vector.interleave2.nxv2i1( %mask, %mask) %interleaved.vec = tail call @llvm.vector.interleave2.nxv2i32( %v0, %v0) @@ -484,22 +336,13 @@ define void @masked_store_factor2_v2( %mask, ret void } -define void @masked_load_store_factor2_v2_shared_mask( %mask, ptr %ptr, i32 %evl) { -; RV32-LABEL: masked_load_store_factor2_v2_shared_mask: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; RV32-NEXT: vlseg2e32.v v8, (a0), v0.t -; RV32-NEXT: vsseg2e32.v v8, (a0), v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: masked_load_store_factor2_v2_shared_mask: -; RV64: # %bb.0: -; RV64-NEXT: slli a1, a1, 32 -; RV64-NEXT: srli a1, a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; RV64-NEXT: vlseg2e32.v v8, (a0), v0.t -; RV64-NEXT: vsseg2e32.v v8, (a0), v0.t -; RV64-NEXT: ret +define void @masked_load_store_factor2_v2_shared_mask( %mask, ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: masked_load_store_factor2_v2_shared_mask: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: ret %rvl = mul nuw i32 %evl, 2 %interleaved.mask = tail call @llvm.vector.interleave2.nxv4i1( %mask, %mask) %wide.masked.load = tail call @llvm.vp.load.nxv4i32.p0(ptr %ptr, %interleaved.mask, i32 %rvl) @@ -511,7 +354,7 @@ define void @masked_load_store_factor2_v2_shared_mask( %mask, p ret void } -define i32 @masked_load_store_factor2_v2_shared_mask_extract( %mask, ptr %ptr, i32 %evl) { +define i32 @masked_load_store_factor2_v2_shared_mask_extract( %mask, ptr %ptr, i32 zeroext %evl) { ; RV32-LABEL: masked_load_store_factor2_v2_shared_mask_extract: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a2, zero, e8, mf4, ta, ma @@ -579,16 +422,14 @@ define i32 @masked_load_store_factor2_v2_shared_mask_extract( % ; RV64-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; RV64-NEXT: vle32.v v10, (a0), v0.t ; RV64-NEXT: li a2, 32 -; RV64-NEXT: slli a3, a1, 32 -; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; RV64-NEXT: vsetvli a3, zero, e32, m1, ta, ma ; RV64-NEXT: vnsrl.wx v13, v10, a2 -; RV64-NEXT: vmv.x.s a1, v10 ; RV64-NEXT: vnsrl.wi v12, v10, 0 -; RV64-NEXT: srli a3, a3, 32 +; RV64-NEXT: vmv.x.s a2, v10 ; RV64-NEXT: vmv1r.v v0, v8 -; RV64-NEXT: vsetvli zero, a3, e32, m1, ta, ma +; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; RV64-NEXT: vsseg2e32.v v12, (a0), v0.t -; RV64-NEXT: mv a0, a1 +; RV64-NEXT: mv a0, a2 ; RV64-NEXT: ret %rvl = mul nuw i32 %evl, 2 %interleaved.mask = tail call @llvm.vector.interleave2.nxv4i1( %mask, %mask) @@ -602,24 +443,14 @@ define i32 @masked_load_store_factor2_v2_shared_mask_extract( % ret i32 %r0 } -define void @masked_store_factor4_v2( %mask, %v0, %v1, ptr %ptr, i32 %evl) { -; RV32-LABEL: masked_store_factor4_v2: -; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; RV32-NEXT: vmv1r.v v10, v8 -; RV32-NEXT: vmv1r.v v11, v9 -; RV32-NEXT: vsseg4e32.v v8, (a0), v0.t -; RV32-NEXT: ret -; -; RV64-LABEL: masked_store_factor4_v2: -; RV64: # %bb.0: -; RV64-NEXT: slli a1, a1, 32 -; RV64-NEXT: srli a1, a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, mf2, ta, ma -; RV64-NEXT: vmv1r.v v10, v8 -; RV64-NEXT: vmv1r.v v11, v9 -; RV64-NEXT: vsseg4e32.v v8, (a0), v0.t -; RV64-NEXT: ret +define void @masked_store_factor4_v2( %mask, %v0, %v1, ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: masked_store_factor4_v2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v9 +; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t +; CHECK-NEXT: ret %rvl = mul nuw i32 %evl, 4 %interleaved.mask = call @llvm.vector.interleave4.nxv4i1( %mask, %mask, %mask, %mask) %interleaved.vec = call @llvm.vector.interleave4.nxv2i32( %v0, %v1, %v0, %v1) @@ -627,7 +458,7 @@ define void @masked_store_factor4_v2( %mask, ret void } -define @load_factor2_oneactive(ptr %ptr, i32 %evl) { +define @load_factor2_oneactive(ptr %ptr, i32 zeroext %evl) { ; RV32-LABEL: load_factor2_oneactive: ; RV32: # %bb.0: ; RV32-NEXT: slli a1, a1, 2 @@ -650,24 +481,14 @@ define @load_factor2_oneactive(ptr %ptr, i32 %evl) { ret %t0 } -define @load_factor5_oneactive(ptr %ptr, i32 %evl) { -; RV32-LABEL: load_factor5_oneactive: -; RV32: # %bb.0: -; RV32-NEXT: addi a0, a0, 12 -; RV32-NEXT: li a2, 20 -; RV32-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; RV32-NEXT: vlse32.v v8, (a0), a2 -; RV32-NEXT: ret -; -; RV64-LABEL: load_factor5_oneactive: -; RV64: # %bb.0: -; RV64-NEXT: slli a1, a1, 32 -; RV64-NEXT: addi a0, a0, 12 -; RV64-NEXT: srli a1, a1, 32 -; RV64-NEXT: li a2, 20 -; RV64-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; RV64-NEXT: vlse32.v v8, (a0), a2 -; RV64-NEXT: ret +define @load_factor5_oneactive(ptr %ptr, i32 zeroext %evl) { +; CHECK-LABEL: load_factor5_oneactive: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, a0, 12 +; CHECK-NEXT: li a2, 20 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a2 +; CHECK-NEXT: ret %rvl = mul nuw i32 %evl, 5 %wide.masked.load = call @llvm.vp.load(ptr %ptr, splat (i1 true), i32 %rvl) %deinterleaved.results = call { , , , , } @llvm.vector.deinterleave5( %wide.masked.load) @@ -678,7 +499,7 @@ define @load_factor5_oneactive(ptr %ptr, i32 %evl) { ; Negative tests -define {, } @not_same_mask( %mask0, %mask1, ptr %ptr, i32 %evl) { +define {, } @not_same_mask( %mask0, %mask1, ptr %ptr, i32 zeroext %evl) { ; RV32-LABEL: not_same_mask: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a2, zero, e8, mf4, ta, ma @@ -763,54 +584,25 @@ define {, } @not_same_mask( } ; EVL should be a multiple of factor -define {, , , } @invalid_evl(ptr %ptr, i32 %evl) { -; RV32-LABEL: invalid_evl: -; RV32: # %bb.0: -; RV32-NEXT: addi sp, sp, -16 -; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: csrr a2, vlenb -; RV32-NEXT: slli a2, a2, 2 -; RV32-NEXT: sub sp, sp, a2 -; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb -; RV32-NEXT: ori a1, a1, 1 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; RV32-NEXT: vle32.v v8, (a0) -; RV32-NEXT: addi a0, sp, 16 -; RV32-NEXT: vs4r.v v8, (a0) -; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma -; RV32-NEXT: vlseg4e32.v v8, (a0) -; RV32-NEXT: csrr a0, vlenb -; RV32-NEXT: slli a0, a0, 2 -; RV32-NEXT: add sp, sp, a0 -; RV32-NEXT: .cfi_def_cfa sp, 16 -; RV32-NEXT: addi sp, sp, 16 -; RV32-NEXT: .cfi_def_cfa_offset 0 -; RV32-NEXT: ret -; -; RV64-LABEL: invalid_evl: -; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -16 -; RV64-NEXT: .cfi_def_cfa_offset 16 -; RV64-NEXT: csrr a2, vlenb -; RV64-NEXT: slli a2, a2, 2 -; RV64-NEXT: sub sp, sp, a2 -; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb -; RV64-NEXT: ori a1, a1, 1 -; RV64-NEXT: slli a1, a1, 32 -; RV64-NEXT: srli a1, a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; RV64-NEXT: vle32.v v8, (a0) -; RV64-NEXT: addi a0, sp, 16 -; RV64-NEXT: vs4r.v v8, (a0) -; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma -; RV64-NEXT: vlseg4e32.v v8, (a0) -; RV64-NEXT: csrr a0, vlenb -; RV64-NEXT: slli a0, a0, 2 -; RV64-NEXT: add sp, sp, a0 -; RV64-NEXT: .cfi_def_cfa sp, 16 -; RV64-NEXT: addi sp, sp, 16 -; RV64-NEXT: .cfi_def_cfa_offset 0 -; RV64-NEXT: ret +define {, , , } @invalid_evl(ptr %ptr, i32 zeroext %evl) nounwind { +; CHECK-LABEL: invalid_evl: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 2 +; CHECK-NEXT: sub sp, sp, a2 +; CHECK-NEXT: ori a1, a1, 1 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs4r.v v8, (a0) +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; CHECK-NEXT: vlseg4e32.v v8, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret %rvl = or i32 %evl, 1 %wide.masked.load = call @llvm.vp.load.nxv8i32.p0(ptr %ptr, splat (i1 true), i32 %rvl) %d = call { , , , } @llvm.vector.deinterleave4.nxv8i32( %wide.masked.load) @@ -826,5 +618,3 @@ define {, , , , , , } %res3 } -;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: -; CHECK: {{.*}}