diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td index 89871278296fc..35242c605d5f5 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td @@ -182,6 +182,8 @@ let Predicates = [HasStdExtZvksh], RVVConstraint = NoConstraint in { // Pseudo instructions //===----------------------------------------------------------------------===// +defm PseudoVANDN : VPseudoVALU_VV_VX; + multiclass VPseudoUnaryV_V { foreach m = MxList in { let VLMul = m.value in { @@ -215,6 +217,34 @@ multiclass VPatUnarySDNode_V { } } +// Helpers for detecting splats since we preprocess splat_vector to vmv.v.x +// This should match the logic in RISCVDAGToDAGISel::selectVSplat +def riscv_splat_vector : PatFrag<(ops node:$rs1), + (riscv_vmv_v_x_vl undef, node:$rs1, srcvalue)>; +def riscv_vnot : PatFrag<(ops node:$rs1), (xor node:$rs1, + (riscv_splat_vector -1))>; + +foreach vti = AllIntegerVectors in { + let Predicates = !listconcat([HasStdExtZvbb], + GetVTypePredicates.Predicates) in { + def : Pat<(vti.Vector (and (riscv_vnot vti.RegClass:$rs1), + vti.RegClass:$rs2)), + (!cast("PseudoVANDN_VV_"#vti.LMul.MX) + (vti.Vector (IMPLICIT_DEF)), + vti.RegClass:$rs2, + vti.RegClass:$rs1, + vti.AVL, vti.Log2SEW, TA_MA)>; + def : Pat<(vti.Vector (and (riscv_splat_vector + (not vti.ScalarRegClass:$rs1)), + vti.RegClass:$rs2)), + (!cast("PseudoVANDN_VX_"#vti.LMul.MX) + (vti.Vector (IMPLICIT_DEF)), + vti.RegClass:$rs2, + vti.ScalarRegClass:$rs1, + vti.AVL, vti.Log2SEW, TA_MA)>; + } +} + defm : VPatUnarySDNode_V; defm : VPatUnarySDNode_V; defm : VPatUnarySDNode_V; diff --git a/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll new file mode 100644 index 0000000000000..adcd676d3e058 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll @@ -0,0 +1,2006 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 +; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV32 +; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV64 +; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB,CHECK-ZVBB32 +; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB,CHECK-ZVBB64 + +define @vandn_vv_nxv1i8( %x, %y) { +; CHECK-LABEL: vandn_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_nxv1i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, mf8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i8 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %a, %y + ret %b +} + +define @vandn_vv_swapped_nxv1i8( %x, %y) { +; CHECK-LABEL: vandn_vv_swapped_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v9, v8 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_swapped_nxv1i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, mf8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i8 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %y, %a + ret %b +} + +define @vandn_vx_nxv1i8(i8 %x, %y) { +; CHECK-LABEL: vandn_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_nxv1i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e8, mf8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i8 %x, -1 + %head = insertelement poison, i8 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vx_swapped_nxv1i8(i8 %x, %y) { +; CHECK-LABEL: vandn_vx_swapped_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_swapped_nxv1i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e8, mf8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i8 %x, -1 + %head = insertelement poison, i8 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vv_nxv2i8( %x, %y) { +; CHECK-LABEL: vandn_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_nxv2i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, mf4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i8 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %a, %y + ret %b +} + +define @vandn_vv_swapped_nxv2i8( %x, %y) { +; CHECK-LABEL: vandn_vv_swapped_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v9, v8 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_swapped_nxv2i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, mf4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i8 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %y, %a + ret %b +} + +define @vandn_vx_nxv2i8(i8 %x, %y) { +; CHECK-LABEL: vandn_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_nxv2i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i8 %x, -1 + %head = insertelement poison, i8 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vx_swapped_nxv2i8(i8 %x, %y) { +; CHECK-LABEL: vandn_vx_swapped_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_swapped_nxv2i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i8 %x, -1 + %head = insertelement poison, i8 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vv_nxv4i8( %x, %y) { +; CHECK-LABEL: vandn_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_nxv4i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, mf2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i8 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %a, %y + ret %b +} + +define @vandn_vv_swapped_nxv4i8( %x, %y) { +; CHECK-LABEL: vandn_vv_swapped_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v9, v8 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_swapped_nxv4i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, mf2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i8 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %y, %a + ret %b +} + +define @vandn_vx_nxv4i8(i8 %x, %y) { +; CHECK-LABEL: vandn_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_nxv4i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i8 %x, -1 + %head = insertelement poison, i8 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vx_swapped_nxv4i8(i8 %x, %y) { +; CHECK-LABEL: vandn_vx_swapped_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_swapped_nxv4i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i8 %x, -1 + %head = insertelement poison, i8 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vv_nxv8i8( %x, %y) { +; CHECK-LABEL: vandn_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_nxv8i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i8 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %a, %y + ret %b +} + +define @vandn_vv_swapped_nxv8i8( %x, %y) { +; CHECK-LABEL: vandn_vv_swapped_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v9, v8 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_swapped_nxv8i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i8 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %y, %a + ret %b +} + +define @vandn_vx_nxv8i8(i8 %x, %y) { +; CHECK-LABEL: vandn_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_nxv8i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i8 %x, -1 + %head = insertelement poison, i8 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vx_swapped_nxv8i8(i8 %x, %y) { +; CHECK-LABEL: vandn_vx_swapped_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_swapped_nxv8i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i8 %x, -1 + %head = insertelement poison, i8 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vv_nxv16i8( %x, %y) { +; CHECK-LABEL: vandn_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v10 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_nxv16i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v10, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i8 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %a, %y + ret %b +} + +define @vandn_vv_swapped_nxv16i8( %x, %y) { +; CHECK-LABEL: vandn_vv_swapped_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v10, v8 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_swapped_nxv16i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v10, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i8 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %y, %a + ret %b +} + +define @vandn_vx_nxv16i8(i8 %x, %y) { +; CHECK-LABEL: vandn_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_nxv16i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e8, m2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i8 %x, -1 + %head = insertelement poison, i8 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vx_swapped_nxv16i8(i8 %x, %y) { +; CHECK-LABEL: vandn_vx_swapped_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_swapped_nxv16i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e8, m2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i8 %x, -1 + %head = insertelement poison, i8 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vv_nxv32i8( %x, %y) { +; CHECK-LABEL: vandn_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v12 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_nxv32i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v12, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i8 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %a, %y + ret %b +} + +define @vandn_vv_swapped_nxv32i8( %x, %y) { +; CHECK-LABEL: vandn_vv_swapped_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v12, v8 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_swapped_nxv32i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v12, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i8 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %y, %a + ret %b +} + +define @vandn_vx_nxv32i8(i8 %x, %y) { +; CHECK-LABEL: vandn_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_nxv32i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i8 %x, -1 + %head = insertelement poison, i8 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vx_swapped_nxv32i8(i8 %x, %y) { +; CHECK-LABEL: vandn_vx_swapped_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_swapped_nxv32i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i8 %x, -1 + %head = insertelement poison, i8 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vv_nxv64i8( %x, %y) { +; CHECK-LABEL: vandn_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v16 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_nxv64i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v16, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i8 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %a, %y + ret %b +} + +define @vandn_vv_swapped_nxv64i8( %x, %y) { +; CHECK-LABEL: vandn_vv_swapped_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v16, v8 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_swapped_nxv64i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e8, m8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v16, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i8 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %y, %a + ret %b +} + +define @vandn_vx_nxv64i8(i8 %x, %y) { +; CHECK-LABEL: vandn_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_nxv64i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e8, m8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i8 %x, -1 + %head = insertelement poison, i8 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vx_swapped_nxv64i8(i8 %x, %y) { +; CHECK-LABEL: vandn_vx_swapped_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_swapped_nxv64i8: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e8, m8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i8 %x, -1 + %head = insertelement poison, i8 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vv_nxv1i16( %x, %y) { +; CHECK-LABEL: vandn_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_nxv1i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i16 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %a, %y + ret %b +} + +define @vandn_vv_swapped_nxv1i16( %x, %y) { +; CHECK-LABEL: vandn_vv_swapped_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v9, v8 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_swapped_nxv1i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i16 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %y, %a + ret %b +} + +define @vandn_vx_nxv1i16(i16 %x, %y) { +; CHECK-LABEL: vandn_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_nxv1i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i16 %x, -1 + %head = insertelement poison, i16 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vx_swapped_nxv1i16(i16 %x, %y) { +; CHECK-LABEL: vandn_vx_swapped_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_swapped_nxv1i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i16 %x, -1 + %head = insertelement poison, i16 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vv_nxv2i16( %x, %y) { +; CHECK-LABEL: vandn_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_nxv2i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i16 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %a, %y + ret %b +} + +define @vandn_vv_swapped_nxv2i16( %x, %y) { +; CHECK-LABEL: vandn_vv_swapped_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v9, v8 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_swapped_nxv2i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i16 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %y, %a + ret %b +} + +define @vandn_vx_nxv2i16(i16 %x, %y) { +; CHECK-LABEL: vandn_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_nxv2i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i16 %x, -1 + %head = insertelement poison, i16 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vx_swapped_nxv2i16(i16 %x, %y) { +; CHECK-LABEL: vandn_vx_swapped_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_swapped_nxv2i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i16 %x, -1 + %head = insertelement poison, i16 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vv_nxv4i16( %x, %y) { +; CHECK-LABEL: vandn_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_nxv4i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i16 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %a, %y + ret %b +} + +define @vandn_vv_swapped_nxv4i16( %x, %y) { +; CHECK-LABEL: vandn_vv_swapped_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v9, v8 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_swapped_nxv4i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i16 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %y, %a + ret %b +} + +define @vandn_vx_nxv4i16(i16 %x, %y) { +; CHECK-LABEL: vandn_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_nxv4i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i16 %x, -1 + %head = insertelement poison, i16 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vx_swapped_nxv4i16(i16 %x, %y) { +; CHECK-LABEL: vandn_vx_swapped_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_swapped_nxv4i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i16 %x, -1 + %head = insertelement poison, i16 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vv_nxv8i16( %x, %y) { +; CHECK-LABEL: vandn_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v10 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_nxv8i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v10, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i16 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %a, %y + ret %b +} + +define @vandn_vv_swapped_nxv8i16( %x, %y) { +; CHECK-LABEL: vandn_vv_swapped_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v10, v8 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_swapped_nxv8i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v10, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i16 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %y, %a + ret %b +} + +define @vandn_vx_nxv8i16(i16 %x, %y) { +; CHECK-LABEL: vandn_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_nxv8i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i16 %x, -1 + %head = insertelement poison, i16 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vx_swapped_nxv8i16(i16 %x, %y) { +; CHECK-LABEL: vandn_vx_swapped_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_swapped_nxv8i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i16 %x, -1 + %head = insertelement poison, i16 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vv_nxv16i16( %x, %y) { +; CHECK-LABEL: vandn_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v12 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_nxv16i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v12, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i16 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %a, %y + ret %b +} + +define @vandn_vv_swapped_nxv16i16( %x, %y) { +; CHECK-LABEL: vandn_vv_swapped_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v12, v8 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_swapped_nxv16i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v12, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i16 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %y, %a + ret %b +} + +define @vandn_vx_nxv16i16(i16 %x, %y) { +; CHECK-LABEL: vandn_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_nxv16i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i16 %x, -1 + %head = insertelement poison, i16 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vx_swapped_nxv16i16(i16 %x, %y) { +; CHECK-LABEL: vandn_vx_swapped_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_swapped_nxv16i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i16 %x, -1 + %head = insertelement poison, i16 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vv_nxv32i16( %x, %y) { +; CHECK-LABEL: vandn_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v16 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_nxv32i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v16, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i16 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %a, %y + ret %b +} + +define @vandn_vv_swapped_nxv32i16( %x, %y) { +; CHECK-LABEL: vandn_vv_swapped_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v16, v8 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_swapped_nxv32i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v16, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i16 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %y, %a + ret %b +} + +define @vandn_vx_nxv32i16(i16 %x, %y) { +; CHECK-LABEL: vandn_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_nxv32i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i16 %x, -1 + %head = insertelement poison, i16 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vx_swapped_nxv32i16(i16 %x, %y) { +; CHECK-LABEL: vandn_vx_swapped_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_swapped_nxv32i16: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i16 %x, -1 + %head = insertelement poison, i16 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vv_nxv1i32( %x, %y) { +; CHECK-LABEL: vandn_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_nxv1i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i32 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %a, %y + ret %b +} + +define @vandn_vv_swapped_nxv1i32( %x, %y) { +; CHECK-LABEL: vandn_vv_swapped_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v9, v8 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_swapped_nxv1i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i32 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %y, %a + ret %b +} + +define @vandn_vx_nxv1i32(i32 %x, %y) { +; CHECK-LABEL: vandn_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_nxv1i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e32, mf2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i32 %x, -1 + %head = insertelement poison, i32 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vx_swapped_nxv1i32(i32 %x, %y) { +; CHECK-LABEL: vandn_vx_swapped_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_swapped_nxv1i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e32, mf2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i32 %x, -1 + %head = insertelement poison, i32 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vv_nxv2i32( %x, %y) { +; CHECK-LABEL: vandn_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_nxv2i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i32 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %a, %y + ret %b +} + +define @vandn_vv_swapped_nxv2i32( %x, %y) { +; CHECK-LABEL: vandn_vv_swapped_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v9, v8 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_swapped_nxv2i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i32 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %y, %a + ret %b +} + +define @vandn_vx_nxv2i32(i32 %x, %y) { +; CHECK-LABEL: vandn_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_nxv2i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i32 %x, -1 + %head = insertelement poison, i32 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vx_swapped_nxv2i32(i32 %x, %y) { +; CHECK-LABEL: vandn_vx_swapped_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_swapped_nxv2i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i32 %x, -1 + %head = insertelement poison, i32 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vv_nxv4i32( %x, %y) { +; CHECK-LABEL: vandn_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v10 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_nxv4i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v10, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i32 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %a, %y + ret %b +} + +define @vandn_vv_swapped_nxv4i32( %x, %y) { +; CHECK-LABEL: vandn_vv_swapped_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v10, v8 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_swapped_nxv4i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v10, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i32 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %y, %a + ret %b +} + +define @vandn_vx_nxv4i32(i32 %x, %y) { +; CHECK-LABEL: vandn_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_nxv4i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i32 %x, -1 + %head = insertelement poison, i32 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vx_swapped_nxv4i32(i32 %x, %y) { +; CHECK-LABEL: vandn_vx_swapped_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_swapped_nxv4i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i32 %x, -1 + %head = insertelement poison, i32 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vv_nxv8i32( %x, %y) { +; CHECK-LABEL: vandn_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v12 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_nxv8i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v12, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i32 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %a, %y + ret %b +} + +define @vandn_vv_swapped_nxv8i32( %x, %y) { +; CHECK-LABEL: vandn_vv_swapped_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v12, v8 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_swapped_nxv8i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v12, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i32 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %y, %a + ret %b +} + +define @vandn_vx_nxv8i32(i32 %x, %y) { +; CHECK-LABEL: vandn_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_nxv8i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e32, m4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i32 %x, -1 + %head = insertelement poison, i32 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vx_swapped_nxv8i32(i32 %x, %y) { +; CHECK-LABEL: vandn_vx_swapped_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_swapped_nxv8i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e32, m4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i32 %x, -1 + %head = insertelement poison, i32 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vv_nxv16i32( %x, %y) { +; CHECK-LABEL: vandn_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v16 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_nxv16i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v16, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i32 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %a, %y + ret %b +} + +define @vandn_vv_swapped_nxv16i32( %x, %y) { +; CHECK-LABEL: vandn_vv_swapped_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v16, v8 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_swapped_nxv16i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v16, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i32 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %y, %a + ret %b +} + +define @vandn_vx_nxv16i32(i32 %x, %y) { +; CHECK-LABEL: vandn_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_nxv16i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e32, m8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i32 %x, -1 + %head = insertelement poison, i32 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vx_swapped_nxv16i32(i32 %x, %y) { +; CHECK-LABEL: vandn_vx_swapped_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: not a0, a0 +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vx_swapped_nxv16i32: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a1, zero, e32, m8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB-NEXT: ret + %a = xor i32 %x, -1 + %head = insertelement poison, i32 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vv_nxv1i64( %x, %y) { +; CHECK-LABEL: vandn_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_nxv1i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i64 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %a, %y + ret %b +} + +define @vandn_vv_swapped_nxv1i64( %x, %y) { +; CHECK-LABEL: vandn_vv_swapped_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v9, v8 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_swapped_nxv1i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v9, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i64 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %y, %a + ret %b +} + +define @vandn_vx_nxv1i64(i64 %x, %y) { +; CHECK-RV32-LABEL: vandn_vx_nxv1i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: addi sp, sp, -16 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16 +; CHECK-RV32-NEXT: not a0, a0 +; CHECK-RV32-NEXT: not a1, a1 +; CHECK-RV32-NEXT: sw a1, 12(sp) +; CHECK-RV32-NEXT: sw a0, 8(sp) +; CHECK-RV32-NEXT: addi a0, sp, 8 +; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma +; CHECK-RV32-NEXT: vlse64.v v9, (a0), zero +; CHECK-RV32-NEXT: vand.vv v8, v9, v8 +; CHECK-RV32-NEXT: addi sp, sp, 16 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: vandn_vx_nxv1i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: not a0, a0 +; CHECK-RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma +; CHECK-RV64-NEXT: vand.vx v8, v8, a0 +; CHECK-RV64-NEXT: ret +; +; CHECK-ZVBB32-LABEL: vandn_vx_nxv1i64: +; CHECK-ZVBB32: # %bb.0: +; CHECK-ZVBB32-NEXT: addi sp, sp, -16 +; CHECK-ZVBB32-NEXT: .cfi_def_cfa_offset 16 +; CHECK-ZVBB32-NEXT: not a0, a0 +; CHECK-ZVBB32-NEXT: not a1, a1 +; CHECK-ZVBB32-NEXT: sw a1, 12(sp) +; CHECK-ZVBB32-NEXT: sw a0, 8(sp) +; CHECK-ZVBB32-NEXT: addi a0, sp, 8 +; CHECK-ZVBB32-NEXT: vsetvli a1, zero, e64, m1, ta, ma +; CHECK-ZVBB32-NEXT: vlse64.v v9, (a0), zero +; CHECK-ZVBB32-NEXT: vand.vv v8, v9, v8 +; CHECK-ZVBB32-NEXT: addi sp, sp, 16 +; CHECK-ZVBB32-NEXT: ret +; +; CHECK-ZVBB64-LABEL: vandn_vx_nxv1i64: +; CHECK-ZVBB64: # %bb.0: +; CHECK-ZVBB64-NEXT: vsetvli a1, zero, e64, m1, ta, ma +; CHECK-ZVBB64-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB64-NEXT: ret + %a = xor i64 %x, -1 + %head = insertelement poison, i64 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vx_swapped_nxv1i64(i64 %x, %y) { +; CHECK-RV32-LABEL: vandn_vx_swapped_nxv1i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: addi sp, sp, -16 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16 +; CHECK-RV32-NEXT: not a0, a0 +; CHECK-RV32-NEXT: not a1, a1 +; CHECK-RV32-NEXT: sw a1, 12(sp) +; CHECK-RV32-NEXT: sw a0, 8(sp) +; CHECK-RV32-NEXT: addi a0, sp, 8 +; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma +; CHECK-RV32-NEXT: vlse64.v v9, (a0), zero +; CHECK-RV32-NEXT: vand.vv v8, v9, v8 +; CHECK-RV32-NEXT: addi sp, sp, 16 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: vandn_vx_swapped_nxv1i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: not a0, a0 +; CHECK-RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma +; CHECK-RV64-NEXT: vand.vx v8, v8, a0 +; CHECK-RV64-NEXT: ret +; +; CHECK-ZVBB32-LABEL: vandn_vx_swapped_nxv1i64: +; CHECK-ZVBB32: # %bb.0: +; CHECK-ZVBB32-NEXT: addi sp, sp, -16 +; CHECK-ZVBB32-NEXT: .cfi_def_cfa_offset 16 +; CHECK-ZVBB32-NEXT: not a0, a0 +; CHECK-ZVBB32-NEXT: not a1, a1 +; CHECK-ZVBB32-NEXT: sw a1, 12(sp) +; CHECK-ZVBB32-NEXT: sw a0, 8(sp) +; CHECK-ZVBB32-NEXT: addi a0, sp, 8 +; CHECK-ZVBB32-NEXT: vsetvli a1, zero, e64, m1, ta, ma +; CHECK-ZVBB32-NEXT: vlse64.v v9, (a0), zero +; CHECK-ZVBB32-NEXT: vand.vv v8, v9, v8 +; CHECK-ZVBB32-NEXT: addi sp, sp, 16 +; CHECK-ZVBB32-NEXT: ret +; +; CHECK-ZVBB64-LABEL: vandn_vx_swapped_nxv1i64: +; CHECK-ZVBB64: # %bb.0: +; CHECK-ZVBB64-NEXT: vsetvli a1, zero, e64, m1, ta, ma +; CHECK-ZVBB64-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB64-NEXT: ret + %a = xor i64 %x, -1 + %head = insertelement poison, i64 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vv_nxv2i64( %x, %y) { +; CHECK-LABEL: vandn_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v10 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_nxv2i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v10, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i64 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %a, %y + ret %b +} + +define @vandn_vv_swapped_nxv2i64( %x, %y) { +; CHECK-LABEL: vandn_vv_swapped_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v10, v8 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_swapped_nxv2i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v10, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i64 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %y, %a + ret %b +} + +define @vandn_vx_nxv2i64(i64 %x, %y) { +; CHECK-RV32-LABEL: vandn_vx_nxv2i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: addi sp, sp, -16 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16 +; CHECK-RV32-NEXT: not a0, a0 +; CHECK-RV32-NEXT: not a1, a1 +; CHECK-RV32-NEXT: sw a1, 12(sp) +; CHECK-RV32-NEXT: sw a0, 8(sp) +; CHECK-RV32-NEXT: addi a0, sp, 8 +; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma +; CHECK-RV32-NEXT: vlse64.v v10, (a0), zero +; CHECK-RV32-NEXT: vand.vv v8, v10, v8 +; CHECK-RV32-NEXT: addi sp, sp, 16 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: vandn_vx_nxv2i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: not a0, a0 +; CHECK-RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma +; CHECK-RV64-NEXT: vand.vx v8, v8, a0 +; CHECK-RV64-NEXT: ret +; +; CHECK-ZVBB32-LABEL: vandn_vx_nxv2i64: +; CHECK-ZVBB32: # %bb.0: +; CHECK-ZVBB32-NEXT: addi sp, sp, -16 +; CHECK-ZVBB32-NEXT: .cfi_def_cfa_offset 16 +; CHECK-ZVBB32-NEXT: not a0, a0 +; CHECK-ZVBB32-NEXT: not a1, a1 +; CHECK-ZVBB32-NEXT: sw a1, 12(sp) +; CHECK-ZVBB32-NEXT: sw a0, 8(sp) +; CHECK-ZVBB32-NEXT: addi a0, sp, 8 +; CHECK-ZVBB32-NEXT: vsetvli a1, zero, e64, m2, ta, ma +; CHECK-ZVBB32-NEXT: vlse64.v v10, (a0), zero +; CHECK-ZVBB32-NEXT: vand.vv v8, v10, v8 +; CHECK-ZVBB32-NEXT: addi sp, sp, 16 +; CHECK-ZVBB32-NEXT: ret +; +; CHECK-ZVBB64-LABEL: vandn_vx_nxv2i64: +; CHECK-ZVBB64: # %bb.0: +; CHECK-ZVBB64-NEXT: vsetvli a1, zero, e64, m2, ta, ma +; CHECK-ZVBB64-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB64-NEXT: ret + %a = xor i64 %x, -1 + %head = insertelement poison, i64 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vx_swapped_nxv2i64(i64 %x, %y) { +; CHECK-RV32-LABEL: vandn_vx_swapped_nxv2i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: addi sp, sp, -16 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16 +; CHECK-RV32-NEXT: not a0, a0 +; CHECK-RV32-NEXT: not a1, a1 +; CHECK-RV32-NEXT: sw a1, 12(sp) +; CHECK-RV32-NEXT: sw a0, 8(sp) +; CHECK-RV32-NEXT: addi a0, sp, 8 +; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma +; CHECK-RV32-NEXT: vlse64.v v10, (a0), zero +; CHECK-RV32-NEXT: vand.vv v8, v10, v8 +; CHECK-RV32-NEXT: addi sp, sp, 16 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: vandn_vx_swapped_nxv2i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: not a0, a0 +; CHECK-RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma +; CHECK-RV64-NEXT: vand.vx v8, v8, a0 +; CHECK-RV64-NEXT: ret +; +; CHECK-ZVBB32-LABEL: vandn_vx_swapped_nxv2i64: +; CHECK-ZVBB32: # %bb.0: +; CHECK-ZVBB32-NEXT: addi sp, sp, -16 +; CHECK-ZVBB32-NEXT: .cfi_def_cfa_offset 16 +; CHECK-ZVBB32-NEXT: not a0, a0 +; CHECK-ZVBB32-NEXT: not a1, a1 +; CHECK-ZVBB32-NEXT: sw a1, 12(sp) +; CHECK-ZVBB32-NEXT: sw a0, 8(sp) +; CHECK-ZVBB32-NEXT: addi a0, sp, 8 +; CHECK-ZVBB32-NEXT: vsetvli a1, zero, e64, m2, ta, ma +; CHECK-ZVBB32-NEXT: vlse64.v v10, (a0), zero +; CHECK-ZVBB32-NEXT: vand.vv v8, v10, v8 +; CHECK-ZVBB32-NEXT: addi sp, sp, 16 +; CHECK-ZVBB32-NEXT: ret +; +; CHECK-ZVBB64-LABEL: vandn_vx_swapped_nxv2i64: +; CHECK-ZVBB64: # %bb.0: +; CHECK-ZVBB64-NEXT: vsetvli a1, zero, e64, m2, ta, ma +; CHECK-ZVBB64-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB64-NEXT: ret + %a = xor i64 %x, -1 + %head = insertelement poison, i64 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vv_nxv4i64( %x, %y) { +; CHECK-LABEL: vandn_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v12 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_nxv4i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v12, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i64 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %a, %y + ret %b +} + +define @vandn_vv_swapped_nxv4i64( %x, %y) { +; CHECK-LABEL: vandn_vv_swapped_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v12, v8 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_swapped_nxv4i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v12, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i64 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %y, %a + ret %b +} + +define @vandn_vx_nxv4i64(i64 %x, %y) { +; CHECK-RV32-LABEL: vandn_vx_nxv4i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: addi sp, sp, -16 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16 +; CHECK-RV32-NEXT: not a0, a0 +; CHECK-RV32-NEXT: not a1, a1 +; CHECK-RV32-NEXT: sw a1, 12(sp) +; CHECK-RV32-NEXT: sw a0, 8(sp) +; CHECK-RV32-NEXT: addi a0, sp, 8 +; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma +; CHECK-RV32-NEXT: vlse64.v v12, (a0), zero +; CHECK-RV32-NEXT: vand.vv v8, v12, v8 +; CHECK-RV32-NEXT: addi sp, sp, 16 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: vandn_vx_nxv4i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: not a0, a0 +; CHECK-RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma +; CHECK-RV64-NEXT: vand.vx v8, v8, a0 +; CHECK-RV64-NEXT: ret +; +; CHECK-ZVBB32-LABEL: vandn_vx_nxv4i64: +; CHECK-ZVBB32: # %bb.0: +; CHECK-ZVBB32-NEXT: addi sp, sp, -16 +; CHECK-ZVBB32-NEXT: .cfi_def_cfa_offset 16 +; CHECK-ZVBB32-NEXT: not a0, a0 +; CHECK-ZVBB32-NEXT: not a1, a1 +; CHECK-ZVBB32-NEXT: sw a1, 12(sp) +; CHECK-ZVBB32-NEXT: sw a0, 8(sp) +; CHECK-ZVBB32-NEXT: addi a0, sp, 8 +; CHECK-ZVBB32-NEXT: vsetvli a1, zero, e64, m4, ta, ma +; CHECK-ZVBB32-NEXT: vlse64.v v12, (a0), zero +; CHECK-ZVBB32-NEXT: vand.vv v8, v12, v8 +; CHECK-ZVBB32-NEXT: addi sp, sp, 16 +; CHECK-ZVBB32-NEXT: ret +; +; CHECK-ZVBB64-LABEL: vandn_vx_nxv4i64: +; CHECK-ZVBB64: # %bb.0: +; CHECK-ZVBB64-NEXT: vsetvli a1, zero, e64, m4, ta, ma +; CHECK-ZVBB64-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB64-NEXT: ret + %a = xor i64 %x, -1 + %head = insertelement poison, i64 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vx_swapped_nxv4i64(i64 %x, %y) { +; CHECK-RV32-LABEL: vandn_vx_swapped_nxv4i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: addi sp, sp, -16 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16 +; CHECK-RV32-NEXT: not a0, a0 +; CHECK-RV32-NEXT: not a1, a1 +; CHECK-RV32-NEXT: sw a1, 12(sp) +; CHECK-RV32-NEXT: sw a0, 8(sp) +; CHECK-RV32-NEXT: addi a0, sp, 8 +; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma +; CHECK-RV32-NEXT: vlse64.v v12, (a0), zero +; CHECK-RV32-NEXT: vand.vv v8, v12, v8 +; CHECK-RV32-NEXT: addi sp, sp, 16 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: vandn_vx_swapped_nxv4i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: not a0, a0 +; CHECK-RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma +; CHECK-RV64-NEXT: vand.vx v8, v8, a0 +; CHECK-RV64-NEXT: ret +; +; CHECK-ZVBB32-LABEL: vandn_vx_swapped_nxv4i64: +; CHECK-ZVBB32: # %bb.0: +; CHECK-ZVBB32-NEXT: addi sp, sp, -16 +; CHECK-ZVBB32-NEXT: .cfi_def_cfa_offset 16 +; CHECK-ZVBB32-NEXT: not a0, a0 +; CHECK-ZVBB32-NEXT: not a1, a1 +; CHECK-ZVBB32-NEXT: sw a1, 12(sp) +; CHECK-ZVBB32-NEXT: sw a0, 8(sp) +; CHECK-ZVBB32-NEXT: addi a0, sp, 8 +; CHECK-ZVBB32-NEXT: vsetvli a1, zero, e64, m4, ta, ma +; CHECK-ZVBB32-NEXT: vlse64.v v12, (a0), zero +; CHECK-ZVBB32-NEXT: vand.vv v8, v12, v8 +; CHECK-ZVBB32-NEXT: addi sp, sp, 16 +; CHECK-ZVBB32-NEXT: ret +; +; CHECK-ZVBB64-LABEL: vandn_vx_swapped_nxv4i64: +; CHECK-ZVBB64: # %bb.0: +; CHECK-ZVBB64-NEXT: vsetvli a1, zero, e64, m4, ta, ma +; CHECK-ZVBB64-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB64-NEXT: ret + %a = xor i64 %x, -1 + %head = insertelement poison, i64 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vv_nxv8i64( %x, %y) { +; CHECK-LABEL: vandn_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v16 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_nxv8i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v16, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i64 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %a, %y + ret %b +} + +define @vandn_vv_swapped_nxv8i64( %x, %y) { +; CHECK-LABEL: vandn_vv_swapped_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v16, v8 +; CHECK-NEXT: ret +; +; CHECK-ZVBB-LABEL: vandn_vv_swapped_nxv8i64: +; CHECK-ZVBB: # %bb.0: +; CHECK-ZVBB-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-ZVBB-NEXT: vandn.vv v8, v16, v8 +; CHECK-ZVBB-NEXT: ret + %head = insertelement poison, i64 -1, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %a = xor %x, %splat + %b = and %y, %a + ret %b +} + +define @vandn_vx_nxv8i64(i64 %x, %y) { +; CHECK-RV32-LABEL: vandn_vx_nxv8i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: addi sp, sp, -16 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16 +; CHECK-RV32-NEXT: not a0, a0 +; CHECK-RV32-NEXT: not a1, a1 +; CHECK-RV32-NEXT: sw a1, 12(sp) +; CHECK-RV32-NEXT: sw a0, 8(sp) +; CHECK-RV32-NEXT: addi a0, sp, 8 +; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma +; CHECK-RV32-NEXT: vlse64.v v16, (a0), zero +; CHECK-RV32-NEXT: vand.vv v8, v16, v8 +; CHECK-RV32-NEXT: addi sp, sp, 16 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: vandn_vx_nxv8i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: not a0, a0 +; CHECK-RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma +; CHECK-RV64-NEXT: vand.vx v8, v8, a0 +; CHECK-RV64-NEXT: ret +; +; CHECK-ZVBB32-LABEL: vandn_vx_nxv8i64: +; CHECK-ZVBB32: # %bb.0: +; CHECK-ZVBB32-NEXT: addi sp, sp, -16 +; CHECK-ZVBB32-NEXT: .cfi_def_cfa_offset 16 +; CHECK-ZVBB32-NEXT: not a0, a0 +; CHECK-ZVBB32-NEXT: not a1, a1 +; CHECK-ZVBB32-NEXT: sw a1, 12(sp) +; CHECK-ZVBB32-NEXT: sw a0, 8(sp) +; CHECK-ZVBB32-NEXT: addi a0, sp, 8 +; CHECK-ZVBB32-NEXT: vsetvli a1, zero, e64, m8, ta, ma +; CHECK-ZVBB32-NEXT: vlse64.v v16, (a0), zero +; CHECK-ZVBB32-NEXT: vand.vv v8, v16, v8 +; CHECK-ZVBB32-NEXT: addi sp, sp, 16 +; CHECK-ZVBB32-NEXT: ret +; +; CHECK-ZVBB64-LABEL: vandn_vx_nxv8i64: +; CHECK-ZVBB64: # %bb.0: +; CHECK-ZVBB64-NEXT: vsetvli a1, zero, e64, m8, ta, ma +; CHECK-ZVBB64-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB64-NEXT: ret + %a = xor i64 %x, -1 + %head = insertelement poison, i64 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} + +define @vandn_vx_swapped_nxv8i64(i64 %x, %y) { +; CHECK-RV32-LABEL: vandn_vx_swapped_nxv8i64: +; CHECK-RV32: # %bb.0: +; CHECK-RV32-NEXT: addi sp, sp, -16 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16 +; CHECK-RV32-NEXT: not a0, a0 +; CHECK-RV32-NEXT: not a1, a1 +; CHECK-RV32-NEXT: sw a1, 12(sp) +; CHECK-RV32-NEXT: sw a0, 8(sp) +; CHECK-RV32-NEXT: addi a0, sp, 8 +; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma +; CHECK-RV32-NEXT: vlse64.v v16, (a0), zero +; CHECK-RV32-NEXT: vand.vv v8, v16, v8 +; CHECK-RV32-NEXT: addi sp, sp, 16 +; CHECK-RV32-NEXT: ret +; +; CHECK-RV64-LABEL: vandn_vx_swapped_nxv8i64: +; CHECK-RV64: # %bb.0: +; CHECK-RV64-NEXT: not a0, a0 +; CHECK-RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma +; CHECK-RV64-NEXT: vand.vx v8, v8, a0 +; CHECK-RV64-NEXT: ret +; +; CHECK-ZVBB32-LABEL: vandn_vx_swapped_nxv8i64: +; CHECK-ZVBB32: # %bb.0: +; CHECK-ZVBB32-NEXT: addi sp, sp, -16 +; CHECK-ZVBB32-NEXT: .cfi_def_cfa_offset 16 +; CHECK-ZVBB32-NEXT: not a0, a0 +; CHECK-ZVBB32-NEXT: not a1, a1 +; CHECK-ZVBB32-NEXT: sw a1, 12(sp) +; CHECK-ZVBB32-NEXT: sw a0, 8(sp) +; CHECK-ZVBB32-NEXT: addi a0, sp, 8 +; CHECK-ZVBB32-NEXT: vsetvli a1, zero, e64, m8, ta, ma +; CHECK-ZVBB32-NEXT: vlse64.v v16, (a0), zero +; CHECK-ZVBB32-NEXT: vand.vv v8, v16, v8 +; CHECK-ZVBB32-NEXT: addi sp, sp, 16 +; CHECK-ZVBB32-NEXT: ret +; +; CHECK-ZVBB64-LABEL: vandn_vx_swapped_nxv8i64: +; CHECK-ZVBB64: # %bb.0: +; CHECK-ZVBB64-NEXT: vsetvli a1, zero, e64, m8, ta, ma +; CHECK-ZVBB64-NEXT: vandn.vx v8, v8, a0 +; CHECK-ZVBB64-NEXT: ret + %a = xor i64 %x, -1 + %head = insertelement poison, i64 %a, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %b = and %splat, %y + ret %b +} +