diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll b/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll new file mode 100644 index 0000000000000..3cb6f3c35286c --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll @@ -0,0 +1,254 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: llc -mtriple=riscv32 -mattr='+v' -O3 %s -o - | FileCheck %s + +declare @llvm.riscv.vadd.nxv1i8.nxv1i8( + , + , + , + i32) + +declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( + , + , + , + , + i32, i32) + +declare @llvm.riscv.vsub.nxv1i8.nxv1i8( + , + , + , + i32) + +declare @llvm.riscv.vmul.nxv1i8.nxv1i8( + , + , + , + i32) + +define @simple_vadd_vv( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: simple_vadd_vv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vadd.vv v9, v8, v9 +; CHECK-NEXT: vadd.vv v9, v8, v9 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i8.nxv1i8( + undef, + %0, + %1, + i32 %2) + + %b = call @llvm.riscv.vadd.nxv1i8.nxv1i8( + undef, + %0, + %a, + i32 %2) + + %c = call @llvm.riscv.vadd.nxv1i8.nxv1i8( + undef, + %0, + %b, + i32 %2) + + ret %c +} + +define @simple_vadd_vsub_vv( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: simple_vadd_vsub_vv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsub.vv v9, v8, v9 +; CHECK-NEXT: vadd.vv v9, v8, v9 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsub.nxv1i8.nxv1i8( + undef, + %0, + %1, + i32 %2) + + %b = call @llvm.riscv.vadd.nxv1i8.nxv1i8( + undef, + %0, + %a, + i32 %2) + + %c = call @llvm.riscv.vadd.nxv1i8.nxv1i8( + undef, + %0, + %b, + i32 %2) + + ret %c +} + +define @simple_vmul_vv( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: simple_vmul_vv: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmul.vv v9, v8, v9 +; CHECK-NEXT: vmul.vv v9, v8, v9 +; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmul.nxv1i8.nxv1i8( + undef, + %0, + %1, + i32 %2) + + %b = call @llvm.riscv.vmul.nxv1i8.nxv1i8( + undef, + %0, + %a, + i32 %2) + + %c = call @llvm.riscv.vmul.nxv1i8.nxv1i8( + undef, + %0, + %b, + i32 %2) + + ret %c +} + +; With passthru and masks. +define @vadd_vv_passthru( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: vadd_vv_passthru: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vadd.vv v10, v8, v9 +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vadd.vv v9, v8, v10 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i8.nxv1i8( + %0, + %0, + %1, + i32 %2) + + %b = call @llvm.riscv.vadd.nxv1i8.nxv1i8( + %0, + %0, + %a, + i32 %2) + + %c = call @llvm.riscv.vadd.nxv1i8.nxv1i8( + %0, + %0, + %b, + i32 %2) + + ret %c +} + +define @vadd_vv_passthru_negative( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: vadd_vv_passthru_negative: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vadd.vv v10, v8, v9 +; CHECK-NEXT: vadd.vv v9, v8, v10 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i8.nxv1i8( + %0, + %0, + %1, + i32 %2) + + %b = call @llvm.riscv.vadd.nxv1i8.nxv1i8( + %1, + %0, + %a, + i32 %2) + + %c = call @llvm.riscv.vadd.nxv1i8.nxv1i8( + %0, + %0, + %b, + i32 %2) + + ret %c +} + +define @vadd_vv_mask( %0, %1, i32 %2, %m) nounwind { +; CHECK-LABEL: vadd_vv_mask: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vadd.vv v10, v8, v9, v0.t +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vadd.vv v9, v8, v10, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( + %0, + %0, + %1, + %m, + i32 %2, i32 1) + + %b = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( + %0, + %0, + %a, + %m, + i32 %2, i32 1) + + %c = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( + %0, + %0, + %b, + %m, + i32 %2, i32 1) + + ret %c +} + +define @vadd_vv_mask_negative( %0, %1, i32 %2, %m) nounwind { +; CHECK-LABEL: vadd_vv_mask_negative: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vadd.vv v10, v8, v9, v0.t +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vadd.vv v9, v8, v10, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( + %0, + %0, + %1, + %m, + i32 %2, i32 1) + + %b = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( + %0, + %0, + %a, + %m, + i32 %2, i32 1) + + %splat = insertelement poison, i1 1, i32 0 + %m2 = shufflevector %splat, poison, zeroinitializer + %c = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( + %0, + %0, + %b, + %m2, + i32 %2, i32 1) + + ret %c +} +