diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 10a3889a8b9ecf..a097df996ad2f4 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -362,9 +362,15 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom); } - for (auto VT : MVT::integer_scalable_vector_valuetypes()) + for (auto VT : MVT::integer_scalable_vector_valuetypes()) { setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); + setOperationAction(ISD::SMIN, VT, Legal); + setOperationAction(ISD::SMAX, VT, Legal); + setOperationAction(ISD::UMIN, VT, Legal); + setOperationAction(ISD::UMAX, VT, Legal); + } + // We must custom-lower SPLAT_VECTOR vXi64 on RV32 if (!Subtarget.is64Bit()) setOperationAction(ISD::SPLAT_VECTOR, MVT::i64, Custom); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td index 61974c0a2ea9d7..a646cd49da3a21 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -164,6 +164,12 @@ defm "" : VPatBinarySDNode_VV_VX_VI; defm "" : VPatBinarySDNode_VV_VX_VI; defm "" : VPatBinarySDNode_VV_VX_VI; +// 12.9. Vector Integer Min/Max Instructions +defm "" : VPatBinarySDNode_VV_VX; +defm "" : VPatBinarySDNode_VV_VX; +defm "" : VPatBinarySDNode_VV_VX; +defm "" : VPatBinarySDNode_VV_VX; + } // Predicates = [HasStdExtV] //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode-rv32.ll new file mode 100644 index 00000000000000..cdff9d061a0f62 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode-rv32.ll @@ -0,0 +1,871 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vmax_vv_nxv1i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv1i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv2i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv2i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv4i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv4i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv8i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv16i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv16i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv32i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv32i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv64i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vmax.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv64i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv1i16( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vmax_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv1i16_0( %va) { +; CHECK-LABEL: vmax_vi_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv2i16( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vmax_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv2i16_0( %va) { +; CHECK-LABEL: vmax_vi_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv4i16( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vmax_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv4i16_0( %va) { +; CHECK-LABEL: vmax_vi_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vmax_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv8i16_0( %va) { +; CHECK-LABEL: vmax_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv16i16( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vmax_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv16i16_0( %va) { +; CHECK-LABEL: vmax_vi_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv32i16( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vmax.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vmax_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv32i16_0( %va) { +; CHECK-LABEL: vmax_vi_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv1i32( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv1i32( %va, i32 %b) { +; CHECK-LABEL: vmax_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv1i32_0( %va) { +; CHECK-LABEL: vmax_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv2i32( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv2i32( %va, i32 %b) { +; CHECK-LABEL: vmax_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv2i32_0( %va) { +; CHECK-LABEL: vmax_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv4i32( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv4i32( %va, i32 %b) { +; CHECK-LABEL: vmax_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv4i32_0( %va) { +; CHECK-LABEL: vmax_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: vmax_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv8i32_0( %va) { +; CHECK-LABEL: vmax_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv16i32( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vmax.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv16i32( %va, i32 %b) { +; CHECK-LABEL: vmax_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv16i32_0( %va) { +; CHECK-LABEL: vmax_vi_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv1i64( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vmax_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v25, v25, a1 +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vsrl.vx v26, v26, a1 +; CHECK-NEXT: vor.vv v25, v26, v25 +; CHECK-NEXT: vmax.vv v16, v16, v25 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv1i64_0( %va) { +; CHECK-LABEL: vmax_vi_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv2i64( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vmax_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vsrl.vx v28, v28, a1 +; CHECK-NEXT: vor.vv v26, v28, v26 +; CHECK-NEXT: vmax.vv v16, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv2i64_0( %va) { +; CHECK-LABEL: vmax_vi_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv4i64( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vmax_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vsrl.vx v8, v8, a1 +; CHECK-NEXT: vor.vv v28, v8, v28 +; CHECK-NEXT: vmax.vv v16, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv4i64_0( %va) { +; CHECK-LABEL: vmax_vi_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmax.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vmax_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmax.vv v16, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv8i64_0( %va) { +; CHECK-LABEL: vmax_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode-rv64.ll new file mode 100644 index 00000000000000..b3a45bb981b487 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode-rv64.ll @@ -0,0 +1,843 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vmax_vv_nxv1i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv1i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv2i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv2i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv4i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv4i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv8i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv16i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv16i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv32i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv32i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv64i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vmax.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv64i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv1i16( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vmax_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv1i16_0( %va) { +; CHECK-LABEL: vmax_vi_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv2i16( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vmax_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv2i16_0( %va) { +; CHECK-LABEL: vmax_vi_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv4i16( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vmax_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv4i16_0( %va) { +; CHECK-LABEL: vmax_vi_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vmax_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv8i16_0( %va) { +; CHECK-LABEL: vmax_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv16i16( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vmax_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv16i16_0( %va) { +; CHECK-LABEL: vmax_vi_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv32i16( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vmax.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vmax_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv32i16_0( %va) { +; CHECK-LABEL: vmax_vi_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv1i32( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv1i32( %va, i32 signext %b) { +; CHECK-LABEL: vmax_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv1i32_0( %va) { +; CHECK-LABEL: vmax_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv2i32( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv2i32( %va, i32 signext %b) { +; CHECK-LABEL: vmax_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv2i32_0( %va) { +; CHECK-LABEL: vmax_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv4i32( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv4i32( %va, i32 signext %b) { +; CHECK-LABEL: vmax_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv4i32_0( %va) { +; CHECK-LABEL: vmax_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv8i32( %va, i32 signext %b) { +; CHECK-LABEL: vmax_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv8i32_0( %va) { +; CHECK-LABEL: vmax_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv16i32( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vmax.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv16i32( %va, i32 signext %b) { +; CHECK-LABEL: vmax_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv16i32_0( %va) { +; CHECK-LABEL: vmax_vi_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv1i64( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vmax_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv1i64_0( %va) { +; CHECK-LABEL: vmax_vi_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv2i64( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vmax_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv2i64_0( %va) { +; CHECK-LABEL: vmax_vi_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv4i64( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vmax.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vmax_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv4i64_0( %va) { +; CHECK-LABEL: vmax_vi_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmax.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp sgt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vmax_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv8i64_0( %va) { +; CHECK-LABEL: vmax_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmax.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp sgt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode-rv32.ll new file mode 100644 index 00000000000000..2c35d8fcfda842 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode-rv32.ll @@ -0,0 +1,871 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vmax_vv_nxv1i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv1i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv2i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv2i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv4i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv4i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv8i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv16i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv16i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv32i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv32i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv64i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vmaxu.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv64i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv1i16( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vmax_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv1i16_0( %va) { +; CHECK-LABEL: vmax_vi_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv2i16( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vmax_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv2i16_0( %va) { +; CHECK-LABEL: vmax_vi_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv4i16( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vmax_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv4i16_0( %va) { +; CHECK-LABEL: vmax_vi_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vmax_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv8i16_0( %va) { +; CHECK-LABEL: vmax_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv16i16( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vmax_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv16i16_0( %va) { +; CHECK-LABEL: vmax_vi_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv32i16( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vmaxu.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vmax_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv32i16_0( %va) { +; CHECK-LABEL: vmax_vi_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv1i32( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv1i32( %va, i32 %b) { +; CHECK-LABEL: vmax_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv1i32_0( %va) { +; CHECK-LABEL: vmax_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv2i32( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv2i32( %va, i32 %b) { +; CHECK-LABEL: vmax_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv2i32_0( %va) { +; CHECK-LABEL: vmax_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv4i32( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv4i32( %va, i32 %b) { +; CHECK-LABEL: vmax_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv4i32_0( %va) { +; CHECK-LABEL: vmax_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: vmax_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv8i32_0( %va) { +; CHECK-LABEL: vmax_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv16i32( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vmaxu.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv16i32( %va, i32 %b) { +; CHECK-LABEL: vmax_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv16i32_0( %va) { +; CHECK-LABEL: vmax_vi_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv1i64( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vmax_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v25, v25, a1 +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vsrl.vx v26, v26, a1 +; CHECK-NEXT: vor.vv v25, v26, v25 +; CHECK-NEXT: vmaxu.vv v16, v16, v25 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv1i64_0( %va) { +; CHECK-LABEL: vmax_vi_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv2i64( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vmax_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vsrl.vx v28, v28, a1 +; CHECK-NEXT: vor.vv v26, v28, v26 +; CHECK-NEXT: vmaxu.vv v16, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv2i64_0( %va) { +; CHECK-LABEL: vmax_vi_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv4i64( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vmax_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vsrl.vx v8, v8, a1 +; CHECK-NEXT: vor.vv v28, v8, v28 +; CHECK-NEXT: vmaxu.vv v16, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv4i64_0( %va) { +; CHECK-LABEL: vmax_vi_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmaxu.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vmax_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmaxu.vv v16, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv8i64_0( %va) { +; CHECK-LABEL: vmax_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode-rv64.ll new file mode 100644 index 00000000000000..400ad872f09f52 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode-rv64.ll @@ -0,0 +1,843 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vmax_vv_nxv1i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv1i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv2i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv2i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv4i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv4i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv8i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv16i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv16i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv32i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv32i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv64i8( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vmaxu.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vmax_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv64i8_0( %va) { +; CHECK-LABEL: vmax_vi_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv1i16( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vmax_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv1i16_0( %va) { +; CHECK-LABEL: vmax_vi_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv2i16( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vmax_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv2i16_0( %va) { +; CHECK-LABEL: vmax_vi_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv4i16( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vmax_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv4i16_0( %va) { +; CHECK-LABEL: vmax_vi_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vmax_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv8i16_0( %va) { +; CHECK-LABEL: vmax_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv16i16( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vmax_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv16i16_0( %va) { +; CHECK-LABEL: vmax_vi_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv32i16( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vmaxu.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vmax_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv32i16_0( %va) { +; CHECK-LABEL: vmax_vi_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv1i32( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv1i32( %va, i32 signext %b) { +; CHECK-LABEL: vmax_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv1i32_0( %va) { +; CHECK-LABEL: vmax_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv2i32( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv2i32( %va, i32 signext %b) { +; CHECK-LABEL: vmax_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv2i32_0( %va) { +; CHECK-LABEL: vmax_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv4i32( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv4i32( %va, i32 signext %b) { +; CHECK-LABEL: vmax_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv4i32_0( %va) { +; CHECK-LABEL: vmax_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv8i32( %va, i32 signext %b) { +; CHECK-LABEL: vmax_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv8i32_0( %va) { +; CHECK-LABEL: vmax_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv16i32( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vmaxu.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv16i32( %va, i32 signext %b) { +; CHECK-LABEL: vmax_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv16i32_0( %va) { +; CHECK-LABEL: vmax_vi_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv1i64( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vmax_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv1i64_0( %va) { +; CHECK-LABEL: vmax_vi_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv2i64( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vmax_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv2i64_0( %va) { +; CHECK-LABEL: vmax_vi_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv4i64( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vmaxu.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vmax_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv4i64_0( %va) { +; CHECK-LABEL: vmax_vi_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: vmax_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmaxu.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp ugt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmax_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vmax_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmax_vi_nxv8i64_0( %va) { +; CHECK-LABEL: vmax_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmaxu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ugt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode-rv32.ll new file mode 100644 index 00000000000000..0c3cb5a84c4b72 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode-rv32.ll @@ -0,0 +1,871 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vmin_vv_nxv1i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv1i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv2i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv2i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv4i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv4i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv8i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv16i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv16i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv32i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv32i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv64i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vmin.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv64i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv1i16( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vmin_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv1i16_0( %va) { +; CHECK-LABEL: vmin_vi_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv2i16( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vmin_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv2i16_0( %va) { +; CHECK-LABEL: vmin_vi_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv4i16( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vmin_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv4i16_0( %va) { +; CHECK-LABEL: vmin_vi_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vmin_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv8i16_0( %va) { +; CHECK-LABEL: vmin_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv16i16( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vmin_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv16i16_0( %va) { +; CHECK-LABEL: vmin_vi_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv32i16( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vmin.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vmin_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv32i16_0( %va) { +; CHECK-LABEL: vmin_vi_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv1i32( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv1i32( %va, i32 %b) { +; CHECK-LABEL: vmin_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv1i32_0( %va) { +; CHECK-LABEL: vmin_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv2i32( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv2i32( %va, i32 %b) { +; CHECK-LABEL: vmin_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv2i32_0( %va) { +; CHECK-LABEL: vmin_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv4i32( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv4i32( %va, i32 %b) { +; CHECK-LABEL: vmin_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv4i32_0( %va) { +; CHECK-LABEL: vmin_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: vmin_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv8i32_0( %va) { +; CHECK-LABEL: vmin_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv16i32( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vmin.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv16i32( %va, i32 %b) { +; CHECK-LABEL: vmin_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv16i32_0( %va) { +; CHECK-LABEL: vmin_vi_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv1i64( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vmin_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v25, v25, a1 +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vsrl.vx v26, v26, a1 +; CHECK-NEXT: vor.vv v25, v26, v25 +; CHECK-NEXT: vmin.vv v16, v16, v25 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv1i64_0( %va) { +; CHECK-LABEL: vmin_vi_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv2i64( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vmin_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vsrl.vx v28, v28, a1 +; CHECK-NEXT: vor.vv v26, v28, v26 +; CHECK-NEXT: vmin.vv v16, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv2i64_0( %va) { +; CHECK-LABEL: vmin_vi_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv4i64( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vmin_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vsrl.vx v8, v8, a1 +; CHECK-NEXT: vor.vv v28, v8, v28 +; CHECK-NEXT: vmin.vv v16, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv4i64_0( %va) { +; CHECK-LABEL: vmin_vi_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmin.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vmin_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmin.vv v16, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv8i64_0( %va) { +; CHECK-LABEL: vmin_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode-rv64.ll new file mode 100644 index 00000000000000..f9d629d20beab9 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode-rv64.ll @@ -0,0 +1,843 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vmin_vv_nxv1i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv1i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv2i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv2i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv4i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv4i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv8i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv16i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv16i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv32i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv32i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv64i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vmin.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv64i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv1i16( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vmin_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv1i16_0( %va) { +; CHECK-LABEL: vmin_vi_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv2i16( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vmin_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv2i16_0( %va) { +; CHECK-LABEL: vmin_vi_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv4i16( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vmin_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv4i16_0( %va) { +; CHECK-LABEL: vmin_vi_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vmin_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv8i16_0( %va) { +; CHECK-LABEL: vmin_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv16i16( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vmin_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv16i16_0( %va) { +; CHECK-LABEL: vmin_vi_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv32i16( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vmin.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vmin_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv32i16_0( %va) { +; CHECK-LABEL: vmin_vi_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv1i32( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv1i32( %va, i32 signext %b) { +; CHECK-LABEL: vmin_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv1i32_0( %va) { +; CHECK-LABEL: vmin_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv2i32( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv2i32( %va, i32 signext %b) { +; CHECK-LABEL: vmin_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv2i32_0( %va) { +; CHECK-LABEL: vmin_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv4i32( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv4i32( %va, i32 signext %b) { +; CHECK-LABEL: vmin_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv4i32_0( %va) { +; CHECK-LABEL: vmin_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv8i32( %va, i32 signext %b) { +; CHECK-LABEL: vmin_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv8i32_0( %va) { +; CHECK-LABEL: vmin_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv16i32( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vmin.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv16i32( %va, i32 signext %b) { +; CHECK-LABEL: vmin_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv16i32_0( %va) { +; CHECK-LABEL: vmin_vi_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv1i64( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vmin_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv1i64_0( %va) { +; CHECK-LABEL: vmin_vi_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv2i64( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vmin_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv2i64_0( %va) { +; CHECK-LABEL: vmin_vi_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv4i64( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vmin.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vmin_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv4i64_0( %va) { +; CHECK-LABEL: vmin_vi_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmin.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp slt %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vmin_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv8i64_0( %va) { +; CHECK-LABEL: vmin_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmin.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp slt %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode-rv32.ll new file mode 100644 index 00000000000000..bef5a496c9b75f --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode-rv32.ll @@ -0,0 +1,871 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vmin_vv_nxv1i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv1i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv2i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv2i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv4i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv4i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv8i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv16i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv16i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv32i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv32i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv64i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vminu.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv64i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv1i16( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vmin_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv1i16_0( %va) { +; CHECK-LABEL: vmin_vi_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv2i16( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vmin_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv2i16_0( %va) { +; CHECK-LABEL: vmin_vi_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv4i16( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vmin_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv4i16_0( %va) { +; CHECK-LABEL: vmin_vi_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vmin_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv8i16_0( %va) { +; CHECK-LABEL: vmin_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv16i16( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vmin_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv16i16_0( %va) { +; CHECK-LABEL: vmin_vi_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv32i16( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vminu.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vmin_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv32i16_0( %va) { +; CHECK-LABEL: vmin_vi_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv1i32( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv1i32( %va, i32 %b) { +; CHECK-LABEL: vmin_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv1i32_0( %va) { +; CHECK-LABEL: vmin_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv2i32( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv2i32( %va, i32 %b) { +; CHECK-LABEL: vmin_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv2i32_0( %va) { +; CHECK-LABEL: vmin_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv4i32( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv4i32( %va, i32 %b) { +; CHECK-LABEL: vmin_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv4i32_0( %va) { +; CHECK-LABEL: vmin_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: vmin_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv8i32_0( %va) { +; CHECK-LABEL: vmin_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv16i32( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vminu.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv16i32( %va, i32 %b) { +; CHECK-LABEL: vmin_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv16i32_0( %va) { +; CHECK-LABEL: vmin_vi_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv1i64( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vmin_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v25, v25, a1 +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vsrl.vx v26, v26, a1 +; CHECK-NEXT: vor.vv v25, v26, v25 +; CHECK-NEXT: vminu.vv v16, v16, v25 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv1i64_0( %va) { +; CHECK-LABEL: vmin_vi_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv2i64( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vmin_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vsrl.vx v28, v28, a1 +; CHECK-NEXT: vor.vv v26, v28, v26 +; CHECK-NEXT: vminu.vv v16, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv2i64_0( %va) { +; CHECK-LABEL: vmin_vi_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv4i64( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vmin_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vsrl.vx v8, v8, a1 +; CHECK-NEXT: vor.vv v28, v8, v28 +; CHECK-NEXT: vminu.vv v16, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv4i64_0( %va) { +; CHECK-LABEL: vmin_vi_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vminu.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vmin_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vminu.vv v16, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv8i64_0( %va) { +; CHECK-LABEL: vmin_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode-rv64.ll new file mode 100644 index 00000000000000..8a136f2fd5f97b --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode-rv64.ll @@ -0,0 +1,843 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @vmin_vv_nxv1i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf8,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv1i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv1i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv1i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv2i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv2i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv2i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv2i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv4i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,mf2,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv4i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv4i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv4i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv8i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv8i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv16i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv16i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv16i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv16i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv32i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv32i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv32i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv32i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv64i8( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vminu.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv64i8( %va, i8 signext %b) { +; CHECK-LABEL: vmin_vx_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv64i8_0( %va) { +; CHECK-LABEL: vmin_vi_nxv64i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv1i16( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv1i16( %va, i16 signext %b) { +; CHECK-LABEL: vmin_vx_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv1i16_0( %va) { +; CHECK-LABEL: vmin_vi_nxv1i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv2i16( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv2i16( %va, i16 signext %b) { +; CHECK-LABEL: vmin_vx_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv2i16_0( %va) { +; CHECK-LABEL: vmin_vi_nxv2i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv4i16( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv4i16( %va, i16 signext %b) { +; CHECK-LABEL: vmin_vx_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv4i16_0( %va) { +; CHECK-LABEL: vmin_vi_nxv4i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv8i16( %va, i16 signext %b) { +; CHECK-LABEL: vmin_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv8i16_0( %va) { +; CHECK-LABEL: vmin_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv16i16( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv16i16( %va, i16 signext %b) { +; CHECK-LABEL: vmin_vx_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv16i16_0( %va) { +; CHECK-LABEL: vmin_vi_nxv16i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv32i16( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vminu.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv32i16( %va, i16 signext %b) { +; CHECK-LABEL: vmin_vx_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv32i16_0( %va) { +; CHECK-LABEL: vmin_vi_nxv32i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv1i32( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv1i32( %va, i32 signext %b) { +; CHECK-LABEL: vmin_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv1i32_0( %va) { +; CHECK-LABEL: vmin_vi_nxv1i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv2i32( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv2i32( %va, i32 signext %b) { +; CHECK-LABEL: vmin_vx_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv2i32_0( %va) { +; CHECK-LABEL: vmin_vi_nxv2i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv4i32( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv4i32( %va, i32 signext %b) { +; CHECK-LABEL: vmin_vx_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv4i32_0( %va) { +; CHECK-LABEL: vmin_vi_nxv4i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv8i32( %va, i32 signext %b) { +; CHECK-LABEL: vmin_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv8i32_0( %va) { +; CHECK-LABEL: vmin_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv16i32( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vminu.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv16i32( %va, i32 signext %b) { +; CHECK-LABEL: vmin_vx_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv16i32_0( %va) { +; CHECK-LABEL: vmin_vi_nxv16i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv1i64( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v17 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv1i64( %va, i64 %b) { +; CHECK-LABEL: vmin_vx_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv1i64_0( %va) { +; CHECK-LABEL: vmin_vi_nxv1i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv2i64( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v18 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv2i64( %va, i64 %b) { +; CHECK-LABEL: vmin_vx_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv2i64_0( %va) { +; CHECK-LABEL: vmin_vi_nxv2i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv4i64( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vminu.vv v16, v16, v20 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv4i64( %va, i64 %b) { +; CHECK-LABEL: vmin_vx_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv4i64_0( %va) { +; CHECK-LABEL: vmin_vi_nxv4i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: vmin_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vminu.vv v16, v16, v8 +; CHECK-NEXT: ret + %cmp = icmp ult %va, %vb + %vc = select %cmp, %va, %vb + ret %vc +} + +define @vmin_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: vmin_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} + +define @vmin_vi_nxv8i64_0( %va) { +; CHECK-LABEL: vmin_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -3 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vminu.vx v16, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -3, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %cmp = icmp ult %va, %splat + %vc = select %cmp, %va, %splat + ret %vc +} +