110 changes: 90 additions & 20 deletions llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,16 @@ define <vscale x 8 x i8> @strided_vpload_nxv8i8(ptr %ptr, i32 signext %stride, <
ret <vscale x 8 x i8> %load
}

define <vscale x 8 x i8> @strided_vpload_nxv8i8_unit_stride(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: strided_vpload_nxv8i8_unit_stride:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x i8> @llvm.experimental.vp.strided.load.nxv8i8.p0.i32(ptr %ptr, i32 1, <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x i8> %load
}

define <vscale x 8 x i8> @strided_vpload_nxv8i8_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) {
; CHECK-LABEL: strided_vpload_nxv8i8_allones_mask:
; CHECK: # %bb.0:
Expand Down Expand Up @@ -186,6 +196,16 @@ define <vscale x 4 x i16> @strided_vpload_nxv4i16(ptr %ptr, i32 signext %stride,
ret <vscale x 4 x i16> %load
}

define <vscale x 4 x i16> @strided_vpload_nxv4i16_unit_stride(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: strided_vpload_nxv4i16_unit_stride:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x i16> @llvm.experimental.vp.strided.load.nxv4i16.p0.i32(ptr %ptr, i32 2, <vscale x 4 x i1> %m, i32 %evl)
ret <vscale x 4 x i16> %load
}

declare <vscale x 8 x i16> @llvm.experimental.vp.strided.load.nxv8i16.p0.i32(ptr, i32, <vscale x 8 x i1>, i32)

define <vscale x 8 x i16> @strided_vpload_nxv8i16(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) {
Expand Down Expand Up @@ -222,6 +242,16 @@ define <vscale x 2 x i32> @strided_vpload_nxv2i32(ptr %ptr, i32 signext %stride,
ret <vscale x 2 x i32> %load
}

define <vscale x 2 x i32> @strided_vpload_nxv2i32_unit_stride(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: strided_vpload_nxv2i32_unit_stride:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x i32> @llvm.experimental.vp.strided.load.nxv2i32.p0.i32(ptr %ptr, i32 4, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x i32> %load
}

declare <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i32(ptr, i32, <vscale x 4 x i1>, i32)

define <vscale x 4 x i32> @strided_vpload_nxv4i32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) {
Expand Down Expand Up @@ -270,6 +300,16 @@ define <vscale x 1 x i64> @strided_vpload_nxv1i64(ptr %ptr, i32 signext %stride,
ret <vscale x 1 x i64> %load
}

define <vscale x 1 x i64> @strided_vpload_nxv1i64_unit_stride(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: strided_vpload_nxv1i64_unit_stride:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x i64> @llvm.experimental.vp.strided.load.nxv1i64.p0.i32(ptr %ptr, i32 8, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i64> %load
}

define <vscale x 1 x i64> @strided_vpload_nxv1i64_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) {
; CHECK-LABEL: strided_vpload_nxv1i64_allones_mask:
; CHECK: # %bb.0:
Expand Down Expand Up @@ -366,6 +406,16 @@ define <vscale x 4 x half> @strided_vpload_nxv4f16(ptr %ptr, i32 signext %stride
ret <vscale x 4 x half> %load
}

define <vscale x 4 x half> @strided_vpload_nxv4f16_unit_stride(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: strided_vpload_nxv4f16_unit_stride:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x half> @llvm.experimental.vp.strided.load.nxv4f16.p0.i32(ptr %ptr, i32 2, <vscale x 4 x i1> %m, i32 %evl)
ret <vscale x 4 x half> %load
}

declare <vscale x 8 x half> @llvm.experimental.vp.strided.load.nxv8f16.p0.i32(ptr, i32, <vscale x 8 x i1>, i32)

define <vscale x 8 x half> @strided_vpload_nxv8f16(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) {
Expand Down Expand Up @@ -402,6 +452,16 @@ define <vscale x 2 x float> @strided_vpload_nxv2f32(ptr %ptr, i32 signext %strid
ret <vscale x 2 x float> %load
}

define <vscale x 2 x float> @strided_vpload_nxv2f32_unit_stride(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: strided_vpload_nxv2f32_unit_stride:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x float> @llvm.experimental.vp.strided.load.nxv2f32.p0.i32(ptr %ptr, i32 4, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x float> %load
}

declare <vscale x 4 x float> @llvm.experimental.vp.strided.load.nxv4f32.p0.i32(ptr, i32, <vscale x 4 x i1>, i32)

define <vscale x 4 x float> @strided_vpload_nxv4f32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) {
Expand Down Expand Up @@ -450,6 +510,16 @@ define <vscale x 1 x double> @strided_vpload_nxv1f64(ptr %ptr, i32 signext %stri
ret <vscale x 1 x double> %load
}

define <vscale x 1 x double> @strided_vpload_nxv1f64_unit_stride(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: strided_vpload_nxv1f64_unit_stride:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x double> @llvm.experimental.vp.strided.load.nxv1f64.p0.i32(ptr %ptr, i32 8, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x double> %load
}

declare <vscale x 2 x double> @llvm.experimental.vp.strided.load.nxv2f64.p0.i32(ptr, i32, <vscale x 2 x i1>, i32)

define <vscale x 2 x double> @strided_vpload_nxv2f64(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) {
Expand Down Expand Up @@ -533,10 +603,10 @@ define <vscale x 16 x double> @strided_load_nxv16f64(ptr %ptr, i64 %stride, <vsc
; CHECK-RV32-NEXT: sltu a5, a3, a2
; CHECK-RV32-NEXT: addi a5, a5, -1
; CHECK-RV32-NEXT: and a2, a5, a2
; CHECK-RV32-NEXT: bltu a3, a4, .LBB42_2
; CHECK-RV32-NEXT: bltu a3, a4, .LBB49_2
; CHECK-RV32-NEXT: # %bb.1:
; CHECK-RV32-NEXT: mv a3, a4
; CHECK-RV32-NEXT: .LBB42_2:
; CHECK-RV32-NEXT: .LBB49_2:
; CHECK-RV32-NEXT: mul a5, a3, a1
; CHECK-RV32-NEXT: add a5, a0, a5
; CHECK-RV32-NEXT: srli a4, a4, 3
Expand All @@ -558,10 +628,10 @@ define <vscale x 16 x double> @strided_load_nxv16f64(ptr %ptr, i64 %stride, <vsc
; CHECK-RV64-NEXT: sltu a5, a2, a3
; CHECK-RV64-NEXT: addi a5, a5, -1
; CHECK-RV64-NEXT: and a3, a5, a3
; CHECK-RV64-NEXT: bltu a2, a4, .LBB42_2
; CHECK-RV64-NEXT: bltu a2, a4, .LBB49_2
; CHECK-RV64-NEXT: # %bb.1:
; CHECK-RV64-NEXT: mv a2, a4
; CHECK-RV64-NEXT: .LBB42_2:
; CHECK-RV64-NEXT: .LBB49_2:
; CHECK-RV64-NEXT: mul a5, a2, a1
; CHECK-RV64-NEXT: add a5, a0, a5
; CHECK-RV64-NEXT: srli a4, a4, 3
Expand All @@ -586,10 +656,10 @@ define <vscale x 16 x double> @strided_load_nxv16f64_allones_mask(ptr %ptr, i64
; CHECK-RV32-NEXT: sltu a5, a3, a2
; CHECK-RV32-NEXT: addi a5, a5, -1
; CHECK-RV32-NEXT: and a2, a5, a2
; CHECK-RV32-NEXT: bltu a3, a4, .LBB43_2
; CHECK-RV32-NEXT: bltu a3, a4, .LBB50_2
; CHECK-RV32-NEXT: # %bb.1:
; CHECK-RV32-NEXT: mv a3, a4
; CHECK-RV32-NEXT: .LBB43_2:
; CHECK-RV32-NEXT: .LBB50_2:
; CHECK-RV32-NEXT: mul a4, a3, a1
; CHECK-RV32-NEXT: add a4, a0, a4
; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
Expand All @@ -605,10 +675,10 @@ define <vscale x 16 x double> @strided_load_nxv16f64_allones_mask(ptr %ptr, i64
; CHECK-RV64-NEXT: sltu a5, a2, a3
; CHECK-RV64-NEXT: addi a5, a5, -1
; CHECK-RV64-NEXT: and a3, a5, a3
; CHECK-RV64-NEXT: bltu a2, a4, .LBB43_2
; CHECK-RV64-NEXT: bltu a2, a4, .LBB50_2
; CHECK-RV64-NEXT: # %bb.1:
; CHECK-RV64-NEXT: mv a2, a4
; CHECK-RV64-NEXT: .LBB43_2:
; CHECK-RV64-NEXT: .LBB50_2:
; CHECK-RV64-NEXT: mul a4, a2, a1
; CHECK-RV64-NEXT: add a4, a0, a4
; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
Expand All @@ -635,19 +705,19 @@ define <vscale x 16 x double> @strided_load_nxv17f64(ptr %ptr, i64 %stride, <vsc
; CHECK-RV32-NEXT: slli a7, a2, 1
; CHECK-RV32-NEXT: vmv1r.v v8, v0
; CHECK-RV32-NEXT: mv a6, a3
; CHECK-RV32-NEXT: bltu a3, a7, .LBB44_2
; CHECK-RV32-NEXT: bltu a3, a7, .LBB51_2
; CHECK-RV32-NEXT: # %bb.1:
; CHECK-RV32-NEXT: mv a6, a7
; CHECK-RV32-NEXT: .LBB44_2:
; CHECK-RV32-NEXT: .LBB51_2:
; CHECK-RV32-NEXT: sub a5, a6, a2
; CHECK-RV32-NEXT: sltu t0, a6, a5
; CHECK-RV32-NEXT: addi t0, t0, -1
; CHECK-RV32-NEXT: and t0, t0, a5
; CHECK-RV32-NEXT: mv a5, a6
; CHECK-RV32-NEXT: bltu a6, a2, .LBB44_4
; CHECK-RV32-NEXT: bltu a6, a2, .LBB51_4
; CHECK-RV32-NEXT: # %bb.3:
; CHECK-RV32-NEXT: mv a5, a2
; CHECK-RV32-NEXT: .LBB44_4:
; CHECK-RV32-NEXT: .LBB51_4:
; CHECK-RV32-NEXT: mul t1, a5, a1
; CHECK-RV32-NEXT: add t1, a0, t1
; CHECK-RV32-NEXT: srli t2, a2, 3
Expand All @@ -659,10 +729,10 @@ define <vscale x 16 x double> @strided_load_nxv17f64(ptr %ptr, i64 %stride, <vsc
; CHECK-RV32-NEXT: sltu a3, a3, a7
; CHECK-RV32-NEXT: addi a3, a3, -1
; CHECK-RV32-NEXT: and a3, a3, a7
; CHECK-RV32-NEXT: bltu a3, a2, .LBB44_6
; CHECK-RV32-NEXT: bltu a3, a2, .LBB51_6
; CHECK-RV32-NEXT: # %bb.5:
; CHECK-RV32-NEXT: mv a3, a2
; CHECK-RV32-NEXT: .LBB44_6:
; CHECK-RV32-NEXT: .LBB51_6:
; CHECK-RV32-NEXT: mul a6, a6, a1
; CHECK-RV32-NEXT: add a6, a0, a6
; CHECK-RV32-NEXT: srli a2, a2, 2
Expand All @@ -682,19 +752,19 @@ define <vscale x 16 x double> @strided_load_nxv17f64(ptr %ptr, i64 %stride, <vsc
; CHECK-RV64-NEXT: slli a7, a4, 1
; CHECK-RV64-NEXT: vmv1r.v v8, v0
; CHECK-RV64-NEXT: mv a6, a2
; CHECK-RV64-NEXT: bltu a2, a7, .LBB44_2
; CHECK-RV64-NEXT: bltu a2, a7, .LBB51_2
; CHECK-RV64-NEXT: # %bb.1:
; CHECK-RV64-NEXT: mv a6, a7
; CHECK-RV64-NEXT: .LBB44_2:
; CHECK-RV64-NEXT: .LBB51_2:
; CHECK-RV64-NEXT: sub a5, a6, a4
; CHECK-RV64-NEXT: sltu t0, a6, a5
; CHECK-RV64-NEXT: addi t0, t0, -1
; CHECK-RV64-NEXT: and t0, t0, a5
; CHECK-RV64-NEXT: mv a5, a6
; CHECK-RV64-NEXT: bltu a6, a4, .LBB44_4
; CHECK-RV64-NEXT: bltu a6, a4, .LBB51_4
; CHECK-RV64-NEXT: # %bb.3:
; CHECK-RV64-NEXT: mv a5, a4
; CHECK-RV64-NEXT: .LBB44_4:
; CHECK-RV64-NEXT: .LBB51_4:
; CHECK-RV64-NEXT: mul t1, a5, a1
; CHECK-RV64-NEXT: add t1, a0, t1
; CHECK-RV64-NEXT: srli t2, a4, 3
Expand All @@ -706,10 +776,10 @@ define <vscale x 16 x double> @strided_load_nxv17f64(ptr %ptr, i64 %stride, <vsc
; CHECK-RV64-NEXT: sltu a2, a2, a7
; CHECK-RV64-NEXT: addi a2, a2, -1
; CHECK-RV64-NEXT: and a2, a2, a7
; CHECK-RV64-NEXT: bltu a2, a4, .LBB44_6
; CHECK-RV64-NEXT: bltu a2, a4, .LBB51_6
; CHECK-RV64-NEXT: # %bb.5:
; CHECK-RV64-NEXT: mv a2, a4
; CHECK-RV64-NEXT: .LBB44_6:
; CHECK-RV64-NEXT: .LBB51_6:
; CHECK-RV64-NEXT: mul a6, a6, a1
; CHECK-RV64-NEXT: add a6, a0, a6
; CHECK-RV64-NEXT: srli a4, a4, 2
Expand Down