diff --git a/llvm/test/CodeGen/RISCV/rvv/binop-zext.ll b/llvm/test/CodeGen/RISCV/rvv/binop-zext.ll new file mode 100644 index 0000000000000..e050240f0de11 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/binop-zext.ll @@ -0,0 +1,146 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s + +; Check that we perform binary arithmetic in a narrower type where possible, via +; combineBinOpOfZExt or otherwise. + +define @add( %a, %b) { +; CHECK-LABEL: add: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-NEXT: vwaddu.vv v12, v8, v9 +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; CHECK-NEXT: vzext.vf2 v8, v12 +; CHECK-NEXT: ret + %a.zext = zext %a to + %b.zext = zext %b to + %add = add %a.zext, %b.zext + ret %add +} + +define @sub( %a, %b) { +; CHECK-LABEL: sub: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-NEXT: vwsubu.vv v12, v8, v9 +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; CHECK-NEXT: vsext.vf2 v8, v12 +; CHECK-NEXT: ret + %a.zext = zext %a to + %b.zext = zext %b to + %sub = sub %a.zext, %b.zext + ret %sub +} + +define @mul( %a, %b) { +; CHECK-LABEL: mul: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-NEXT: vwmulu.vv v12, v8, v9 +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; CHECK-NEXT: vzext.vf2 v8, v12 +; CHECK-NEXT: ret + %a.zext = zext %a to + %b.zext = zext %b to + %mul = mul %a.zext, %b.zext + ret %mul +} + +define @sdiv( %a, %b) { +; CHECK-LABEL: sdiv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vzext.vf4 v12, v8 +; CHECK-NEXT: vzext.vf4 v16, v9 +; CHECK-NEXT: vdivu.vv v8, v12, v16 +; CHECK-NEXT: ret + %a.zext = zext %a to + %b.zext = zext %b to + %sdiv = sdiv %a.zext, %b.zext + ret %sdiv +} + +define @udiv( %a, %b) { +; CHECK-LABEL: udiv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vzext.vf4 v12, v8 +; CHECK-NEXT: vzext.vf4 v16, v9 +; CHECK-NEXT: vdivu.vv v8, v12, v16 +; CHECK-NEXT: ret + %a.zext = zext %a to + %b.zext = zext %b to + %udiv = udiv %a.zext, %b.zext + ret %udiv +} + +define @srem( %a, %b) { +; CHECK-LABEL: srem: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vzext.vf4 v12, v8 +; CHECK-NEXT: vzext.vf4 v16, v9 +; CHECK-NEXT: vremu.vv v8, v12, v16 +; CHECK-NEXT: ret + %a.zext = zext %a to + %b.zext = zext %b to + %srem = srem %a.zext, %b.zext + ret %srem +} + +define @urem( %a, %b) { +; CHECK-LABEL: urem: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vzext.vf4 v12, v8 +; CHECK-NEXT: vzext.vf4 v16, v9 +; CHECK-NEXT: vremu.vv v8, v12, v16 +; CHECK-NEXT: ret + %a.zext = zext %a to + %b.zext = zext %b to + %urem = urem %a.zext, %b.zext + ret %urem +} + +define @and( %a, %b) { +; CHECK-LABEL: and: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-NEXT: vand.vv v12, v8, v9 +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; CHECK-NEXT: vzext.vf4 v8, v12 +; CHECK-NEXT: ret + %a.zext = zext %a to + %b.zext = zext %b to + %shl = and %a.zext, %b.zext + ret %shl +} + +define @or( %a, %b) { +; CHECK-LABEL: or: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-NEXT: vor.vv v12, v8, v9 +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; CHECK-NEXT: vzext.vf4 v8, v12 +; CHECK-NEXT: ret + %a.zext = zext %a to + %b.zext = zext %b to + %or = or %a.zext, %b.zext + ret %or +} + +define @xor( %a, %b) { +; CHECK-LABEL: xor: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-NEXT: vxor.vv v12, v8, v9 +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; CHECK-NEXT: vzext.vf4 v8, v12 +; CHECK-NEXT: ret + %a.zext = zext %a to + %b.zext = zext %b to + %xor = xor %a.zext, %b.zext + ret %xor +}