diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv32.ll deleted file mode 100644 index 00e35f52b5e049..00000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv32.ll +++ /dev/null @@ -1,183 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1 -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=2 < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2 - -declare <2 x i8> @llvm.experimental.stepvector.v2i8() - -define <2 x i8> @stepvector_v2i8() { -; CHECK-LABEL: stepvector_v2i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret - %v = call <2 x i8> @llvm.experimental.stepvector.v2i8() - ret <2 x i8> %v -} - -declare <3 x i8> @llvm.experimental.stepvector.v3i8() - -define <3 x i8> @stepvector_v3i8() { -; CHECK-LABEL: stepvector_v3i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret - %v = call <3 x i8> @llvm.experimental.stepvector.v3i8() - ret <3 x i8> %v -} - -declare <4 x i8> @llvm.experimental.stepvector.v4i8() - -define <4 x i8> @stepvector_v4i8() { -; CHECK-LABEL: stepvector_v4i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret - %v = call <4 x i8> @llvm.experimental.stepvector.v4i8() - ret <4 x i8> %v -} - -declare <8 x i8> @llvm.experimental.stepvector.v8i8() - -define <8 x i8> @stepvector_v8i8() { -; CHECK-LABEL: stepvector_v8i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret - %v = call <8 x i8> @llvm.experimental.stepvector.v8i8() - ret <8 x i8> %v -} - -declare <16 x i8> @llvm.experimental.stepvector.v16i8() - -define <16 x i8> @stepvector_v16i8() { -; CHECK-LABEL: stepvector_v16i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret - %v = call <16 x i8> @llvm.experimental.stepvector.v16i8() - ret <16 x i8> %v -} - -declare <2 x i16> @llvm.experimental.stepvector.v2i16() - -define <2 x i16> @stepvector_v2i16() { -; CHECK-LABEL: stepvector_v2i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret - %v = call <2 x i16> @llvm.experimental.stepvector.v2i16() - ret <2 x i16> %v -} - -declare <4 x i16> @llvm.experimental.stepvector.v4i16() - -define <4 x i16> @stepvector_v4i16() { -; CHECK-LABEL: stepvector_v4i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret - %v = call <4 x i16> @llvm.experimental.stepvector.v4i16() - ret <4 x i16> %v -} - -declare <8 x i16> @llvm.experimental.stepvector.v8i16() - -define <8 x i16> @stepvector_v8i16() { -; CHECK-LABEL: stepvector_v8i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret - %v = call <8 x i16> @llvm.experimental.stepvector.v8i16() - ret <8 x i16> %v -} - -declare <16 x i16> @llvm.experimental.stepvector.v16i16() - -define <16 x i16> @stepvector_v16i16() { -; LMULMAX1-LABEL: stepvector_v16i16: -; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; LMULMAX1-NEXT: vid.v v8 -; LMULMAX1-NEXT: vadd.vi v9, v8, 8 -; LMULMAX1-NEXT: ret -; -; LMULMAX2-LABEL: stepvector_v16i16: -; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma -; LMULMAX2-NEXT: vid.v v8 -; LMULMAX2-NEXT: ret - %v = call <16 x i16> @llvm.experimental.stepvector.v16i16() - ret <16 x i16> %v -} - -declare <2 x i32> @llvm.experimental.stepvector.v2i32() - -define <2 x i32> @stepvector_v2i32() { -; CHECK-LABEL: stepvector_v2i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret - %v = call <2 x i32> @llvm.experimental.stepvector.v2i32() - ret <2 x i32> %v -} - -declare <4 x i32> @llvm.experimental.stepvector.v4i32() - -define <4 x i32> @stepvector_v4i32() { -; CHECK-LABEL: stepvector_v4i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret - %v = call <4 x i32> @llvm.experimental.stepvector.v4i32() - ret <4 x i32> %v -} - -declare <8 x i32> @llvm.experimental.stepvector.v8i32() - -define <8 x i32> @stepvector_v8i32() { -; LMULMAX1-LABEL: stepvector_v8i32: -; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; LMULMAX1-NEXT: vid.v v8 -; LMULMAX1-NEXT: vadd.vi v9, v8, 4 -; LMULMAX1-NEXT: ret -; -; LMULMAX2-LABEL: stepvector_v8i32: -; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; LMULMAX2-NEXT: vid.v v8 -; LMULMAX2-NEXT: ret - %v = call <8 x i32> @llvm.experimental.stepvector.v8i32() - ret <8 x i32> %v -} - -declare <16 x i32> @llvm.experimental.stepvector.v16i32() - -define <16 x i32> @stepvector_v16i32() { -; LMULMAX1-LABEL: stepvector_v16i32: -; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; LMULMAX1-NEXT: vid.v v8 -; LMULMAX1-NEXT: vadd.vi v9, v8, 4 -; LMULMAX1-NEXT: vadd.vi v10, v8, 8 -; LMULMAX1-NEXT: vadd.vi v11, v8, 12 -; LMULMAX1-NEXT: ret -; -; LMULMAX2-LABEL: stepvector_v16i32: -; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; LMULMAX2-NEXT: vid.v v8 -; LMULMAX2-NEXT: vadd.vi v10, v8, 8 -; LMULMAX2-NEXT: ret - %v = call <16 x i32> @llvm.experimental.stepvector.v16i32() - ret <16 x i32> %v -} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv64.ll deleted file mode 100644 index 9f20e259d5e1ad..00000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv64.ll +++ /dev/null @@ -1,264 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1 -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=2 < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2 - -declare <2 x i8> @llvm.experimental.stepvector.v2i8() - -define <2 x i8> @stepvector_v2i8() { -; CHECK-LABEL: stepvector_v2i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret - %v = call <2 x i8> @llvm.experimental.stepvector.v2i8() - ret <2 x i8> %v -} - -declare <3 x i8> @llvm.experimental.stepvector.v3i8() - -define <3 x i8> @stepvector_v3i8() { -; CHECK-LABEL: stepvector_v3i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret - %v = call <3 x i8> @llvm.experimental.stepvector.v3i8() - ret <3 x i8> %v -} - -declare <4 x i8> @llvm.experimental.stepvector.v4i8() - -define <4 x i8> @stepvector_v4i8() { -; CHECK-LABEL: stepvector_v4i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret - %v = call <4 x i8> @llvm.experimental.stepvector.v4i8() - ret <4 x i8> %v -} - -declare <8 x i8> @llvm.experimental.stepvector.v8i8() - -define <8 x i8> @stepvector_v8i8() { -; CHECK-LABEL: stepvector_v8i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret - %v = call <8 x i8> @llvm.experimental.stepvector.v8i8() - ret <8 x i8> %v -} - -declare <16 x i8> @llvm.experimental.stepvector.v16i8() - -define <16 x i8> @stepvector_v16i8() { -; CHECK-LABEL: stepvector_v16i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret - %v = call <16 x i8> @llvm.experimental.stepvector.v16i8() - ret <16 x i8> %v -} - -declare <2 x i16> @llvm.experimental.stepvector.v2i16() - -define <2 x i16> @stepvector_v2i16() { -; CHECK-LABEL: stepvector_v2i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret - %v = call <2 x i16> @llvm.experimental.stepvector.v2i16() - ret <2 x i16> %v -} - -declare <4 x i16> @llvm.experimental.stepvector.v4i16() - -define <4 x i16> @stepvector_v4i16() { -; CHECK-LABEL: stepvector_v4i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret - %v = call <4 x i16> @llvm.experimental.stepvector.v4i16() - ret <4 x i16> %v -} - -declare <8 x i16> @llvm.experimental.stepvector.v8i16() - -define <8 x i16> @stepvector_v8i16() { -; CHECK-LABEL: stepvector_v8i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret - %v = call <8 x i16> @llvm.experimental.stepvector.v8i16() - ret <8 x i16> %v -} - -declare <16 x i16> @llvm.experimental.stepvector.v16i16() - -define <16 x i16> @stepvector_v16i16() { -; LMULMAX1-LABEL: stepvector_v16i16: -; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; LMULMAX1-NEXT: vid.v v8 -; LMULMAX1-NEXT: vadd.vi v9, v8, 8 -; LMULMAX1-NEXT: ret -; -; LMULMAX2-LABEL: stepvector_v16i16: -; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma -; LMULMAX2-NEXT: vid.v v8 -; LMULMAX2-NEXT: ret - %v = call <16 x i16> @llvm.experimental.stepvector.v16i16() - ret <16 x i16> %v -} - -declare <2 x i32> @llvm.experimental.stepvector.v2i32() - -define <2 x i32> @stepvector_v2i32() { -; CHECK-LABEL: stepvector_v2i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret - %v = call <2 x i32> @llvm.experimental.stepvector.v2i32() - ret <2 x i32> %v -} - -declare <4 x i32> @llvm.experimental.stepvector.v4i32() - -define <4 x i32> @stepvector_v4i32() { -; CHECK-LABEL: stepvector_v4i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret - %v = call <4 x i32> @llvm.experimental.stepvector.v4i32() - ret <4 x i32> %v -} - -declare <8 x i32> @llvm.experimental.stepvector.v8i32() - -define <8 x i32> @stepvector_v8i32() { -; LMULMAX1-LABEL: stepvector_v8i32: -; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; LMULMAX1-NEXT: vid.v v8 -; LMULMAX1-NEXT: vadd.vi v9, v8, 4 -; LMULMAX1-NEXT: ret -; -; LMULMAX2-LABEL: stepvector_v8i32: -; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; LMULMAX2-NEXT: vid.v v8 -; LMULMAX2-NEXT: ret - %v = call <8 x i32> @llvm.experimental.stepvector.v8i32() - ret <8 x i32> %v -} - -declare <16 x i32> @llvm.experimental.stepvector.v16i32() - -define <16 x i32> @stepvector_v16i32() { -; LMULMAX1-LABEL: stepvector_v16i32: -; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; LMULMAX1-NEXT: vid.v v8 -; LMULMAX1-NEXT: vadd.vi v9, v8, 4 -; LMULMAX1-NEXT: vadd.vi v10, v8, 8 -; LMULMAX1-NEXT: vadd.vi v11, v8, 12 -; LMULMAX1-NEXT: ret -; -; LMULMAX2-LABEL: stepvector_v16i32: -; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; LMULMAX2-NEXT: vid.v v8 -; LMULMAX2-NEXT: vadd.vi v10, v8, 8 -; LMULMAX2-NEXT: ret - %v = call <16 x i32> @llvm.experimental.stepvector.v16i32() - ret <16 x i32> %v -} - -declare <2 x i64> @llvm.experimental.stepvector.v2i64() - -define <2 x i64> @stepvector_v2i64() { -; CHECK-LABEL: stepvector_v2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; CHECK-NEXT: vid.v v8 -; CHECK-NEXT: ret - %v = call <2 x i64> @llvm.experimental.stepvector.v2i64() - ret <2 x i64> %v -} - -declare <4 x i64> @llvm.experimental.stepvector.v4i64() - -define <4 x i64> @stepvector_v4i64() { -; LMULMAX1-LABEL: stepvector_v4i64: -; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; LMULMAX1-NEXT: vid.v v8 -; LMULMAX1-NEXT: vadd.vi v9, v8, 2 -; LMULMAX1-NEXT: ret -; -; LMULMAX2-LABEL: stepvector_v4i64: -; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; LMULMAX2-NEXT: vid.v v8 -; LMULMAX2-NEXT: ret - %v = call <4 x i64> @llvm.experimental.stepvector.v4i64() - ret <4 x i64> %v -} - -declare <8 x i64> @llvm.experimental.stepvector.v8i64() - -define <8 x i64> @stepvector_v8i64() { -; LMULMAX1-LABEL: stepvector_v8i64: -; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; LMULMAX1-NEXT: vid.v v8 -; LMULMAX1-NEXT: vadd.vi v9, v8, 2 -; LMULMAX1-NEXT: vadd.vi v10, v8, 4 -; LMULMAX1-NEXT: vadd.vi v11, v8, 6 -; LMULMAX1-NEXT: ret -; -; LMULMAX2-LABEL: stepvector_v8i64: -; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; LMULMAX2-NEXT: vid.v v8 -; LMULMAX2-NEXT: vadd.vi v10, v8, 4 -; LMULMAX2-NEXT: ret - %v = call <8 x i64> @llvm.experimental.stepvector.v8i64() - ret <8 x i64> %v -} - -declare <16 x i64> @llvm.experimental.stepvector.v16i64() - -define <16 x i64> @stepvector_v16i64() { -; LMULMAX1-LABEL: stepvector_v16i64: -; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; LMULMAX1-NEXT: vid.v v8 -; LMULMAX1-NEXT: vadd.vi v9, v8, 2 -; LMULMAX1-NEXT: vadd.vi v10, v8, 4 -; LMULMAX1-NEXT: vadd.vi v11, v8, 6 -; LMULMAX1-NEXT: vadd.vi v12, v8, 8 -; LMULMAX1-NEXT: vadd.vi v13, v8, 10 -; LMULMAX1-NEXT: vadd.vi v14, v8, 12 -; LMULMAX1-NEXT: vadd.vi v15, v8, 14 -; LMULMAX1-NEXT: ret -; -; LMULMAX2-LABEL: stepvector_v16i64: -; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; LMULMAX2-NEXT: vid.v v8 -; LMULMAX2-NEXT: vadd.vi v10, v8, 4 -; LMULMAX2-NEXT: vadd.vi v12, v8, 8 -; LMULMAX2-NEXT: vadd.vi v14, v8, 12 -; LMULMAX2-NEXT: ret - %v = call <16 x i64> @llvm.experimental.stepvector.v16i64() - ret <16 x i64> %v -} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector.ll new file mode 100644 index 00000000000000..58f17d8825bea5 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector.ll @@ -0,0 +1,394 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,RV32LMULMAX1 +; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,RV64LMULMAX1 +; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=2 < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2,RV32LMULMAX2 +; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=2 < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2,RV64LMULMAX2 + +declare <2 x i8> @llvm.experimental.stepvector.v2i8() + +define <2 x i8> @stepvector_v2i8() { +; CHECK-LABEL: stepvector_v2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: ret + %v = call <2 x i8> @llvm.experimental.stepvector.v2i8() + ret <2 x i8> %v +} + +declare <3 x i8> @llvm.experimental.stepvector.v3i8() + +define <3 x i8> @stepvector_v3i8() { +; CHECK-LABEL: stepvector_v3i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: ret + %v = call <3 x i8> @llvm.experimental.stepvector.v3i8() + ret <3 x i8> %v +} + +declare <4 x i8> @llvm.experimental.stepvector.v4i8() + +define <4 x i8> @stepvector_v4i8() { +; CHECK-LABEL: stepvector_v4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: ret + %v = call <4 x i8> @llvm.experimental.stepvector.v4i8() + ret <4 x i8> %v +} + +declare <8 x i8> @llvm.experimental.stepvector.v8i8() + +define <8 x i8> @stepvector_v8i8() { +; CHECK-LABEL: stepvector_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: ret + %v = call <8 x i8> @llvm.experimental.stepvector.v8i8() + ret <8 x i8> %v +} + +declare <16 x i8> @llvm.experimental.stepvector.v16i8() + +define <16 x i8> @stepvector_v16i8() { +; CHECK-LABEL: stepvector_v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: ret + %v = call <16 x i8> @llvm.experimental.stepvector.v16i8() + ret <16 x i8> %v +} + +declare <2 x i16> @llvm.experimental.stepvector.v2i16() + +define <2 x i16> @stepvector_v2i16() { +; CHECK-LABEL: stepvector_v2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: ret + %v = call <2 x i16> @llvm.experimental.stepvector.v2i16() + ret <2 x i16> %v +} + +declare <4 x i16> @llvm.experimental.stepvector.v4i16() + +define <4 x i16> @stepvector_v4i16() { +; CHECK-LABEL: stepvector_v4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: ret + %v = call <4 x i16> @llvm.experimental.stepvector.v4i16() + ret <4 x i16> %v +} + +declare <8 x i16> @llvm.experimental.stepvector.v8i16() + +define <8 x i16> @stepvector_v8i16() { +; CHECK-LABEL: stepvector_v8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: ret + %v = call <8 x i16> @llvm.experimental.stepvector.v8i16() + ret <8 x i16> %v +} + +declare <16 x i16> @llvm.experimental.stepvector.v16i16() + +define <16 x i16> @stepvector_v16i16() { +; LMULMAX1-LABEL: stepvector_v16i16: +; LMULMAX1: # %bb.0: +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-NEXT: vid.v v8 +; LMULMAX1-NEXT: vadd.vi v9, v8, 8 +; LMULMAX1-NEXT: ret +; +; LMULMAX2-LABEL: stepvector_v16i16: +; LMULMAX2: # %bb.0: +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; LMULMAX2-NEXT: vid.v v8 +; LMULMAX2-NEXT: ret + %v = call <16 x i16> @llvm.experimental.stepvector.v16i16() + ret <16 x i16> %v +} + +declare <2 x i32> @llvm.experimental.stepvector.v2i32() + +define <2 x i32> @stepvector_v2i32() { +; CHECK-LABEL: stepvector_v2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: ret + %v = call <2 x i32> @llvm.experimental.stepvector.v2i32() + ret <2 x i32> %v +} + +declare <4 x i32> @llvm.experimental.stepvector.v4i32() + +define <4 x i32> @stepvector_v4i32() { +; CHECK-LABEL: stepvector_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: ret + %v = call <4 x i32> @llvm.experimental.stepvector.v4i32() + ret <4 x i32> %v +} + +declare <8 x i32> @llvm.experimental.stepvector.v8i32() + +define <8 x i32> @stepvector_v8i32() { +; LMULMAX1-LABEL: stepvector_v8i32: +; LMULMAX1: # %bb.0: +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; LMULMAX1-NEXT: vid.v v8 +; LMULMAX1-NEXT: vadd.vi v9, v8, 4 +; LMULMAX1-NEXT: ret +; +; LMULMAX2-LABEL: stepvector_v8i32: +; LMULMAX2: # %bb.0: +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; LMULMAX2-NEXT: vid.v v8 +; LMULMAX2-NEXT: ret + %v = call <8 x i32> @llvm.experimental.stepvector.v8i32() + ret <8 x i32> %v +} + +declare <16 x i32> @llvm.experimental.stepvector.v16i32() + +define <16 x i32> @stepvector_v16i32() { +; LMULMAX1-LABEL: stepvector_v16i32: +; LMULMAX1: # %bb.0: +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; LMULMAX1-NEXT: vid.v v8 +; LMULMAX1-NEXT: vadd.vi v9, v8, 4 +; LMULMAX1-NEXT: vadd.vi v10, v8, 8 +; LMULMAX1-NEXT: vadd.vi v11, v8, 12 +; LMULMAX1-NEXT: ret +; +; LMULMAX2-LABEL: stepvector_v16i32: +; LMULMAX2: # %bb.0: +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; LMULMAX2-NEXT: vid.v v8 +; LMULMAX2-NEXT: vadd.vi v10, v8, 8 +; LMULMAX2-NEXT: ret + %v = call <16 x i32> @llvm.experimental.stepvector.v16i32() + ret <16 x i32> %v +} + +declare <2 x i64> @llvm.experimental.stepvector.v2i64() + +define <2 x i64> @stepvector_v2i64() { +; RV32LMULMAX1-LABEL: stepvector_v2i64: +; RV32LMULMAX1: # %bb.0: +; RV32LMULMAX1-NEXT: li a0, 1 +; RV32LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32LMULMAX1-NEXT: vmv.s.x v9, a0 +; RV32LMULMAX1-NEXT: vmv.v.i v8, 0 +; RV32LMULMAX1-NEXT: vsetivli zero, 3, e32, m1, tu, ma +; RV32LMULMAX1-NEXT: vslideup.vi v8, v9, 2 +; RV32LMULMAX1-NEXT: ret +; +; RV64LMULMAX1-LABEL: stepvector_v2i64: +; RV64LMULMAX1: # %bb.0: +; RV64LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64LMULMAX1-NEXT: vid.v v8 +; RV64LMULMAX1-NEXT: ret +; +; RV32LMULMAX2-LABEL: stepvector_v2i64: +; RV32LMULMAX2: # %bb.0: +; RV32LMULMAX2-NEXT: li a0, 1 +; RV32LMULMAX2-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32LMULMAX2-NEXT: vmv.s.x v9, a0 +; RV32LMULMAX2-NEXT: vmv.v.i v8, 0 +; RV32LMULMAX2-NEXT: vsetivli zero, 3, e32, m1, tu, ma +; RV32LMULMAX2-NEXT: vslideup.vi v8, v9, 2 +; RV32LMULMAX2-NEXT: ret +; +; RV64LMULMAX2-LABEL: stepvector_v2i64: +; RV64LMULMAX2: # %bb.0: +; RV64LMULMAX2-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64LMULMAX2-NEXT: vid.v v8 +; RV64LMULMAX2-NEXT: ret + %v = call <2 x i64> @llvm.experimental.stepvector.v2i64() + ret <2 x i64> %v +} + +declare <4 x i64> @llvm.experimental.stepvector.v4i64() + +define <4 x i64> @stepvector_v4i64() { +; RV32LMULMAX1-LABEL: stepvector_v4i64: +; RV32LMULMAX1: # %bb.0: +; RV32LMULMAX1-NEXT: li a0, 1 +; RV32LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32LMULMAX1-NEXT: vmv.s.x v9, a0 +; RV32LMULMAX1-NEXT: vmv.v.i v8, 0 +; RV32LMULMAX1-NEXT: vsetivli zero, 3, e32, m1, tu, ma +; RV32LMULMAX1-NEXT: vslideup.vi v8, v9, 2 +; RV32LMULMAX1-NEXT: lui a0, %hi(.LCPI14_0) +; RV32LMULMAX1-NEXT: addi a0, a0, %lo(.LCPI14_0) +; RV32LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32LMULMAX1-NEXT: vle32.v v9, (a0) +; RV32LMULMAX1-NEXT: ret +; +; RV64LMULMAX1-LABEL: stepvector_v4i64: +; RV64LMULMAX1: # %bb.0: +; RV64LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64LMULMAX1-NEXT: vid.v v8 +; RV64LMULMAX1-NEXT: vadd.vi v9, v8, 2 +; RV64LMULMAX1-NEXT: ret +; +; RV32LMULMAX2-LABEL: stepvector_v4i64: +; RV32LMULMAX2: # %bb.0: +; RV32LMULMAX2-NEXT: lui a0, %hi(.LCPI14_0) +; RV32LMULMAX2-NEXT: addi a0, a0, %lo(.LCPI14_0) +; RV32LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32LMULMAX2-NEXT: vle32.v v8, (a0) +; RV32LMULMAX2-NEXT: ret +; +; RV64LMULMAX2-LABEL: stepvector_v4i64: +; RV64LMULMAX2: # %bb.0: +; RV64LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV64LMULMAX2-NEXT: vid.v v8 +; RV64LMULMAX2-NEXT: ret + %v = call <4 x i64> @llvm.experimental.stepvector.v4i64() + ret <4 x i64> %v +} + +declare <8 x i64> @llvm.experimental.stepvector.v8i64() + +define <8 x i64> @stepvector_v8i64() { +; RV32LMULMAX1-LABEL: stepvector_v8i64: +; RV32LMULMAX1: # %bb.0: +; RV32LMULMAX1-NEXT: li a0, 1 +; RV32LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32LMULMAX1-NEXT: vmv.s.x v9, a0 +; RV32LMULMAX1-NEXT: vmv.v.i v8, 0 +; RV32LMULMAX1-NEXT: vsetivli zero, 3, e32, m1, tu, ma +; RV32LMULMAX1-NEXT: vslideup.vi v8, v9, 2 +; RV32LMULMAX1-NEXT: lui a0, %hi(.LCPI15_0) +; RV32LMULMAX1-NEXT: addi a0, a0, %lo(.LCPI15_0) +; RV32LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32LMULMAX1-NEXT: vle32.v v9, (a0) +; RV32LMULMAX1-NEXT: lui a0, %hi(.LCPI15_1) +; RV32LMULMAX1-NEXT: addi a0, a0, %lo(.LCPI15_1) +; RV32LMULMAX1-NEXT: vle32.v v10, (a0) +; RV32LMULMAX1-NEXT: lui a0, %hi(.LCPI15_2) +; RV32LMULMAX1-NEXT: addi a0, a0, %lo(.LCPI15_2) +; RV32LMULMAX1-NEXT: vle32.v v11, (a0) +; RV32LMULMAX1-NEXT: ret +; +; RV64LMULMAX1-LABEL: stepvector_v8i64: +; RV64LMULMAX1: # %bb.0: +; RV64LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64LMULMAX1-NEXT: vid.v v8 +; RV64LMULMAX1-NEXT: vadd.vi v9, v8, 2 +; RV64LMULMAX1-NEXT: vadd.vi v10, v8, 4 +; RV64LMULMAX1-NEXT: vadd.vi v11, v8, 6 +; RV64LMULMAX1-NEXT: ret +; +; RV32LMULMAX2-LABEL: stepvector_v8i64: +; RV32LMULMAX2: # %bb.0: +; RV32LMULMAX2-NEXT: lui a0, %hi(.LCPI15_0) +; RV32LMULMAX2-NEXT: addi a0, a0, %lo(.LCPI15_0) +; RV32LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32LMULMAX2-NEXT: vle32.v v8, (a0) +; RV32LMULMAX2-NEXT: lui a0, %hi(.LCPI15_1) +; RV32LMULMAX2-NEXT: addi a0, a0, %lo(.LCPI15_1) +; RV32LMULMAX2-NEXT: vle32.v v10, (a0) +; RV32LMULMAX2-NEXT: ret +; +; RV64LMULMAX2-LABEL: stepvector_v8i64: +; RV64LMULMAX2: # %bb.0: +; RV64LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV64LMULMAX2-NEXT: vid.v v8 +; RV64LMULMAX2-NEXT: vadd.vi v10, v8, 4 +; RV64LMULMAX2-NEXT: ret + %v = call <8 x i64> @llvm.experimental.stepvector.v8i64() + ret <8 x i64> %v +} + +declare <16 x i64> @llvm.experimental.stepvector.v16i64() + +define <16 x i64> @stepvector_v16i64() { +; RV32LMULMAX1-LABEL: stepvector_v16i64: +; RV32LMULMAX1: # %bb.0: +; RV32LMULMAX1-NEXT: li a0, 1 +; RV32LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32LMULMAX1-NEXT: vmv.s.x v9, a0 +; RV32LMULMAX1-NEXT: vmv.v.i v8, 0 +; RV32LMULMAX1-NEXT: vsetivli zero, 3, e32, m1, tu, ma +; RV32LMULMAX1-NEXT: vslideup.vi v8, v9, 2 +; RV32LMULMAX1-NEXT: lui a0, %hi(.LCPI16_0) +; RV32LMULMAX1-NEXT: addi a0, a0, %lo(.LCPI16_0) +; RV32LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32LMULMAX1-NEXT: vle32.v v9, (a0) +; RV32LMULMAX1-NEXT: lui a0, %hi(.LCPI16_1) +; RV32LMULMAX1-NEXT: addi a0, a0, %lo(.LCPI16_1) +; RV32LMULMAX1-NEXT: vle32.v v10, (a0) +; RV32LMULMAX1-NEXT: lui a0, %hi(.LCPI16_2) +; RV32LMULMAX1-NEXT: addi a0, a0, %lo(.LCPI16_2) +; RV32LMULMAX1-NEXT: vle32.v v11, (a0) +; RV32LMULMAX1-NEXT: lui a0, %hi(.LCPI16_3) +; RV32LMULMAX1-NEXT: addi a0, a0, %lo(.LCPI16_3) +; RV32LMULMAX1-NEXT: vle32.v v12, (a0) +; RV32LMULMAX1-NEXT: lui a0, %hi(.LCPI16_4) +; RV32LMULMAX1-NEXT: addi a0, a0, %lo(.LCPI16_4) +; RV32LMULMAX1-NEXT: vle32.v v13, (a0) +; RV32LMULMAX1-NEXT: lui a0, %hi(.LCPI16_5) +; RV32LMULMAX1-NEXT: addi a0, a0, %lo(.LCPI16_5) +; RV32LMULMAX1-NEXT: vle32.v v14, (a0) +; RV32LMULMAX1-NEXT: lui a0, %hi(.LCPI16_6) +; RV32LMULMAX1-NEXT: addi a0, a0, %lo(.LCPI16_6) +; RV32LMULMAX1-NEXT: vle32.v v15, (a0) +; RV32LMULMAX1-NEXT: ret +; +; RV64LMULMAX1-LABEL: stepvector_v16i64: +; RV64LMULMAX1: # %bb.0: +; RV64LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64LMULMAX1-NEXT: vid.v v8 +; RV64LMULMAX1-NEXT: vadd.vi v9, v8, 2 +; RV64LMULMAX1-NEXT: vadd.vi v10, v8, 4 +; RV64LMULMAX1-NEXT: vadd.vi v11, v8, 6 +; RV64LMULMAX1-NEXT: vadd.vi v12, v8, 8 +; RV64LMULMAX1-NEXT: vadd.vi v13, v8, 10 +; RV64LMULMAX1-NEXT: vadd.vi v14, v8, 12 +; RV64LMULMAX1-NEXT: vadd.vi v15, v8, 14 +; RV64LMULMAX1-NEXT: ret +; +; RV32LMULMAX2-LABEL: stepvector_v16i64: +; RV32LMULMAX2: # %bb.0: +; RV32LMULMAX2-NEXT: lui a0, %hi(.LCPI16_0) +; RV32LMULMAX2-NEXT: addi a0, a0, %lo(.LCPI16_0) +; RV32LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32LMULMAX2-NEXT: vle32.v v8, (a0) +; RV32LMULMAX2-NEXT: lui a0, %hi(.LCPI16_1) +; RV32LMULMAX2-NEXT: addi a0, a0, %lo(.LCPI16_1) +; RV32LMULMAX2-NEXT: vle32.v v10, (a0) +; RV32LMULMAX2-NEXT: lui a0, %hi(.LCPI16_2) +; RV32LMULMAX2-NEXT: addi a0, a0, %lo(.LCPI16_2) +; RV32LMULMAX2-NEXT: vle32.v v12, (a0) +; RV32LMULMAX2-NEXT: lui a0, %hi(.LCPI16_3) +; RV32LMULMAX2-NEXT: addi a0, a0, %lo(.LCPI16_3) +; RV32LMULMAX2-NEXT: vle32.v v14, (a0) +; RV32LMULMAX2-NEXT: ret +; +; RV64LMULMAX2-LABEL: stepvector_v16i64: +; RV64LMULMAX2: # %bb.0: +; RV64LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV64LMULMAX2-NEXT: vid.v v8 +; RV64LMULMAX2-NEXT: vadd.vi v10, v8, 4 +; RV64LMULMAX2-NEXT: vadd.vi v12, v8, 8 +; RV64LMULMAX2-NEXT: vadd.vi v14, v8, 12 +; RV64LMULMAX2-NEXT: ret + %v = call <16 x i64> @llvm.experimental.stepvector.v16i64() + ret <16 x i64> %v +}