Skip to content

Conversation

preames
Copy link
Collaborator

@preames preames commented Sep 8, 2025

This is coverage for an upcoming change, but I thought the choice of configurations to check was probably worth a moment of consideration as well.

This is coverage for an upcoming change, but I thought the choice of
configurations to check was probably worth a moment of consideration
as well.
@llvmbot
Copy link
Member

llvmbot commented Sep 8, 2025

@llvm/pr-subscribers-backend-risc-v

Author: Philip Reames (preames)

Changes

This is coverage for an upcoming change, but I thought the choice of configurations to check was probably worth a moment of consideration as well.


Patch is 52.28 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/157539.diff

1 Files Affected:

  • (added) llvm/test/CodeGen/RISCV/select-zbb.ll (+1622)
diff --git a/llvm/test/CodeGen/RISCV/select-zbb.ll b/llvm/test/CodeGen/RISCV/select-zbb.ll
new file mode 100644
index 0000000000000..13e637909b43b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/select-zbb.ll
@@ -0,0 +1,1622 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s | FileCheck --check-prefixes=RV32IM %s
+; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s | FileCheck --check-prefixes=RV64IM %s
+; RUN: llc -mtriple=riscv32 -mattr=+m,+zbb -verify-machineinstrs < %s | FileCheck --check-prefixes=RV32IMZBB %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+zbb -verify-machineinstrs < %s | FileCheck --check-prefixes=RV64IMZBB %s
+; RUN: llc -mtriple=riscv32 -mattr=+m,+zicond -verify-machineinstrs < %s | FileCheck --check-prefixes=RV32IMZICOND %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+zicond -verify-machineinstrs < %s | FileCheck --check-prefixes=RV64IMZICOND %s
+; RUN: llc -mtriple=riscv32 -mattr=+m,+zicond,+zbb -verify-machineinstrs < %s | FileCheck --check-prefixes=RV32IMBOTH %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+zicond,+zbb -verify-machineinstrs < %s | FileCheck --check-prefixes=RV64IMBOTH %s
+
+
+define i32 @select_umin_1(i1 zeroext %cond, i32 %a, i32 %b) {
+; RV32IM-LABEL: select_umin_1:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    bgeu a1, a2, .LBB0_3
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    beqz a0, .LBB0_4
+; RV32IM-NEXT:  .LBB0_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
+; RV32IM-NEXT:  .LBB0_3: # %entry
+; RV32IM-NEXT:    mv a1, a2
+; RV32IM-NEXT:    bnez a0, .LBB0_2
+; RV32IM-NEXT:  .LBB0_4: # %entry
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_umin_1:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sext.w a3, a2
+; RV64IM-NEXT:    sext.w a1, a1
+; RV64IM-NEXT:    bgeu a1, a3, .LBB0_3
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB0_4
+; RV64IM-NEXT:  .LBB0_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+; RV64IM-NEXT:  .LBB0_3: # %entry
+; RV64IM-NEXT:    mv a1, a3
+; RV64IM-NEXT:    bnez a0, .LBB0_2
+; RV64IM-NEXT:  .LBB0_4: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV32IMZBB-LABEL: select_umin_1:
+; RV32IMZBB:       # %bb.0: # %entry
+; RV32IMZBB-NEXT:    beqz a0, .LBB0_2
+; RV32IMZBB-NEXT:  # %bb.1:
+; RV32IMZBB-NEXT:    minu a2, a1, a2
+; RV32IMZBB-NEXT:  .LBB0_2: # %entry
+; RV32IMZBB-NEXT:    mv a0, a2
+; RV32IMZBB-NEXT:    ret
+;
+; RV64IMZBB-LABEL: select_umin_1:
+; RV64IMZBB:       # %bb.0: # %entry
+; RV64IMZBB-NEXT:    beqz a0, .LBB0_2
+; RV64IMZBB-NEXT:  # %bb.1:
+; RV64IMZBB-NEXT:    sext.w a2, a2
+; RV64IMZBB-NEXT:    sext.w a1, a1
+; RV64IMZBB-NEXT:    minu a2, a1, a2
+; RV64IMZBB-NEXT:  .LBB0_2: # %entry
+; RV64IMZBB-NEXT:    mv a0, a2
+; RV64IMZBB-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_umin_1:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    sltu a3, a1, a2
+; RV32IMZICOND-NEXT:    czero.nez a4, a2, a3
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a3
+; RV32IMZICOND-NEXT:    or a1, a1, a4
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    or a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_umin_1:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    sext.w a3, a2
+; RV64IMZICOND-NEXT:    sext.w a1, a1
+; RV64IMZICOND-NEXT:    sltu a4, a1, a3
+; RV64IMZICOND-NEXT:    czero.nez a3, a3, a4
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a4
+; RV64IMZICOND-NEXT:    or a1, a1, a3
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
+;
+; RV32IMBOTH-LABEL: select_umin_1:
+; RV32IMBOTH:       # %bb.0: # %entry
+; RV32IMBOTH-NEXT:    minu a1, a1, a2
+; RV32IMBOTH-NEXT:    czero.nez a2, a2, a0
+; RV32IMBOTH-NEXT:    czero.eqz a0, a1, a0
+; RV32IMBOTH-NEXT:    or a0, a0, a2
+; RV32IMBOTH-NEXT:    ret
+;
+; RV64IMBOTH-LABEL: select_umin_1:
+; RV64IMBOTH:       # %bb.0: # %entry
+; RV64IMBOTH-NEXT:    sext.w a3, a2
+; RV64IMBOTH-NEXT:    sext.w a1, a1
+; RV64IMBOTH-NEXT:    minu a1, a1, a3
+; RV64IMBOTH-NEXT:    czero.nez a2, a2, a0
+; RV64IMBOTH-NEXT:    czero.eqz a0, a1, a0
+; RV64IMBOTH-NEXT:    or a0, a0, a2
+; RV64IMBOTH-NEXT:    ret
+entry:
+  %c = call i32 @llvm.umin(i32 %a, i32 %b)
+  %res = select i1 %cond, i32 %c, i32 %b
+  ret i32 %res
+}
+
+define i32 @select_umin_2(i1 zeroext %cond, i32 %a, i32 %b) {
+; RV32IM-LABEL: select_umin_2:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    li a3, 32
+; RV32IM-NEXT:    mv a2, a1
+; RV32IM-NEXT:    bgeu a1, a3, .LBB1_3
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    beqz a0, .LBB1_4
+; RV32IM-NEXT:  .LBB1_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
+; RV32IM-NEXT:  .LBB1_3: # %entry
+; RV32IM-NEXT:    li a2, 32
+; RV32IM-NEXT:    bnez a0, .LBB1_2
+; RV32IM-NEXT:  .LBB1_4: # %entry
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_umin_2:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sext.w a2, a1
+; RV64IM-NEXT:    li a3, 32
+; RV64IM-NEXT:    bgeu a2, a3, .LBB1_3
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB1_4
+; RV64IM-NEXT:  .LBB1_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+; RV64IM-NEXT:  .LBB1_3: # %entry
+; RV64IM-NEXT:    li a2, 32
+; RV64IM-NEXT:    bnez a0, .LBB1_2
+; RV64IM-NEXT:  .LBB1_4: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV32IMZBB-LABEL: select_umin_2:
+; RV32IMZBB:       # %bb.0: # %entry
+; RV32IMZBB-NEXT:    bnez a0, .LBB1_2
+; RV32IMZBB-NEXT:  # %bb.1: # %entry
+; RV32IMZBB-NEXT:    li a0, 32
+; RV32IMZBB-NEXT:    minu a1, a1, a0
+; RV32IMZBB-NEXT:  .LBB1_2: # %entry
+; RV32IMZBB-NEXT:    mv a0, a1
+; RV32IMZBB-NEXT:    ret
+;
+; RV64IMZBB-LABEL: select_umin_2:
+; RV64IMZBB:       # %bb.0: # %entry
+; RV64IMZBB-NEXT:    bnez a0, .LBB1_2
+; RV64IMZBB-NEXT:  # %bb.1: # %entry
+; RV64IMZBB-NEXT:    sext.w a1, a1
+; RV64IMZBB-NEXT:    li a0, 32
+; RV64IMZBB-NEXT:    minu a1, a1, a0
+; RV64IMZBB-NEXT:  .LBB1_2: # %entry
+; RV64IMZBB-NEXT:    mv a0, a1
+; RV64IMZBB-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_umin_2:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    sltiu a2, a1, 32
+; RV32IMZICOND-NEXT:    addi a3, a1, -32
+; RV32IMZICOND-NEXT:    czero.eqz a2, a3, a2
+; RV32IMZICOND-NEXT:    addi a2, a2, 32
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    or a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_umin_2:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    sext.w a2, a1
+; RV64IMZICOND-NEXT:    sltiu a3, a2, 32
+; RV64IMZICOND-NEXT:    addi a2, a2, -32
+; RV64IMZICOND-NEXT:    czero.eqz a2, a2, a3
+; RV64IMZICOND-NEXT:    addi a2, a2, 32
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
+;
+; RV32IMBOTH-LABEL: select_umin_2:
+; RV32IMBOTH:       # %bb.0: # %entry
+; RV32IMBOTH-NEXT:    li a2, 32
+; RV32IMBOTH-NEXT:    minu a2, a1, a2
+; RV32IMBOTH-NEXT:    czero.eqz a1, a1, a0
+; RV32IMBOTH-NEXT:    czero.nez a0, a2, a0
+; RV32IMBOTH-NEXT:    or a0, a1, a0
+; RV32IMBOTH-NEXT:    ret
+;
+; RV64IMBOTH-LABEL: select_umin_2:
+; RV64IMBOTH:       # %bb.0: # %entry
+; RV64IMBOTH-NEXT:    sext.w a2, a1
+; RV64IMBOTH-NEXT:    li a3, 32
+; RV64IMBOTH-NEXT:    minu a2, a2, a3
+; RV64IMBOTH-NEXT:    czero.eqz a1, a1, a0
+; RV64IMBOTH-NEXT:    czero.nez a0, a2, a0
+; RV64IMBOTH-NEXT:    or a0, a1, a0
+; RV64IMBOTH-NEXT:    ret
+entry:
+  %c = call i32 @llvm.umin(i32 %a, i32 32)
+  %res = select i1 %cond, i32 %a, i32 %c
+  ret i32 %res
+}
+
+define i32 @select_umin_3(i1 zeroext %cond, i32 %a) {
+; RV32IM-LABEL: select_umin_3:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    li a3, 32
+; RV32IM-NEXT:    mv a2, a1
+; RV32IM-NEXT:    bgeu a1, a3, .LBB2_3
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    beqz a0, .LBB2_4
+; RV32IM-NEXT:  .LBB2_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
+; RV32IM-NEXT:  .LBB2_3: # %entry
+; RV32IM-NEXT:    li a2, 32
+; RV32IM-NEXT:    bnez a0, .LBB2_2
+; RV32IM-NEXT:  .LBB2_4: # %entry
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_umin_3:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sext.w a2, a1
+; RV64IM-NEXT:    li a3, 32
+; RV64IM-NEXT:    bgeu a2, a3, .LBB2_3
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB2_4
+; RV64IM-NEXT:  .LBB2_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+; RV64IM-NEXT:  .LBB2_3: # %entry
+; RV64IM-NEXT:    li a2, 32
+; RV64IM-NEXT:    bnez a0, .LBB2_2
+; RV64IM-NEXT:  .LBB2_4: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV32IMZBB-LABEL: select_umin_3:
+; RV32IMZBB:       # %bb.0: # %entry
+; RV32IMZBB-NEXT:    bnez a0, .LBB2_2
+; RV32IMZBB-NEXT:  # %bb.1: # %entry
+; RV32IMZBB-NEXT:    li a0, 32
+; RV32IMZBB-NEXT:    minu a1, a1, a0
+; RV32IMZBB-NEXT:  .LBB2_2: # %entry
+; RV32IMZBB-NEXT:    mv a0, a1
+; RV32IMZBB-NEXT:    ret
+;
+; RV64IMZBB-LABEL: select_umin_3:
+; RV64IMZBB:       # %bb.0: # %entry
+; RV64IMZBB-NEXT:    bnez a0, .LBB2_2
+; RV64IMZBB-NEXT:  # %bb.1: # %entry
+; RV64IMZBB-NEXT:    sext.w a1, a1
+; RV64IMZBB-NEXT:    li a0, 32
+; RV64IMZBB-NEXT:    minu a1, a1, a0
+; RV64IMZBB-NEXT:  .LBB2_2: # %entry
+; RV64IMZBB-NEXT:    mv a0, a1
+; RV64IMZBB-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_umin_3:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    sltiu a2, a1, 32
+; RV32IMZICOND-NEXT:    addi a3, a1, -32
+; RV32IMZICOND-NEXT:    czero.eqz a2, a3, a2
+; RV32IMZICOND-NEXT:    addi a2, a2, 32
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    or a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_umin_3:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    sext.w a2, a1
+; RV64IMZICOND-NEXT:    sltiu a3, a2, 32
+; RV64IMZICOND-NEXT:    addi a2, a2, -32
+; RV64IMZICOND-NEXT:    czero.eqz a2, a2, a3
+; RV64IMZICOND-NEXT:    addi a2, a2, 32
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
+;
+; RV32IMBOTH-LABEL: select_umin_3:
+; RV32IMBOTH:       # %bb.0: # %entry
+; RV32IMBOTH-NEXT:    li a2, 32
+; RV32IMBOTH-NEXT:    minu a2, a1, a2
+; RV32IMBOTH-NEXT:    czero.eqz a1, a1, a0
+; RV32IMBOTH-NEXT:    czero.nez a0, a2, a0
+; RV32IMBOTH-NEXT:    or a0, a1, a0
+; RV32IMBOTH-NEXT:    ret
+;
+; RV64IMBOTH-LABEL: select_umin_3:
+; RV64IMBOTH:       # %bb.0: # %entry
+; RV64IMBOTH-NEXT:    sext.w a2, a1
+; RV64IMBOTH-NEXT:    li a3, 32
+; RV64IMBOTH-NEXT:    minu a2, a2, a3
+; RV64IMBOTH-NEXT:    czero.eqz a1, a1, a0
+; RV64IMBOTH-NEXT:    czero.nez a0, a2, a0
+; RV64IMBOTH-NEXT:    or a0, a1, a0
+; RV64IMBOTH-NEXT:    ret
+entry:
+  %c = call i32 @llvm.umin(i32 %a, i32 32)
+  %res = select i1 %cond, i32 %a, i32 %c
+  ret i32 %res
+}
+
+define i32 @select_umin_4(i1 zeroext %cond, i32 %x) {
+; RV32IM-LABEL: select_umin_4:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    li a2, 128
+; RV32IM-NEXT:    bgeu a1, a2, .LBB3_3
+; RV32IM-NEXT:  # %bb.1:
+; RV32IM-NEXT:    beqz a0, .LBB3_4
+; RV32IM-NEXT:  .LBB3_2:
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
+; RV32IM-NEXT:  .LBB3_3:
+; RV32IM-NEXT:    li a1, 128
+; RV32IM-NEXT:    bnez a0, .LBB3_2
+; RV32IM-NEXT:  .LBB3_4:
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_umin_4:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    sext.w a2, a1
+; RV64IM-NEXT:    li a1, 128
+; RV64IM-NEXT:    bgeu a2, a1, .LBB3_3
+; RV64IM-NEXT:  # %bb.1:
+; RV64IM-NEXT:    beqz a0, .LBB3_4
+; RV64IM-NEXT:  .LBB3_2:
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+; RV64IM-NEXT:  .LBB3_3:
+; RV64IM-NEXT:    li a2, 128
+; RV64IM-NEXT:    bnez a0, .LBB3_2
+; RV64IM-NEXT:  .LBB3_4:
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV32IMZBB-LABEL: select_umin_4:
+; RV32IMZBB:       # %bb.0:
+; RV32IMZBB-NEXT:    mv a2, a0
+; RV32IMZBB-NEXT:    li a0, 128
+; RV32IMZBB-NEXT:    bnez a2, .LBB3_2
+; RV32IMZBB-NEXT:  # %bb.1:
+; RV32IMZBB-NEXT:    minu a0, a1, a0
+; RV32IMZBB-NEXT:  .LBB3_2:
+; RV32IMZBB-NEXT:    ret
+;
+; RV64IMZBB-LABEL: select_umin_4:
+; RV64IMZBB:       # %bb.0:
+; RV64IMZBB-NEXT:    mv a2, a0
+; RV64IMZBB-NEXT:    li a0, 128
+; RV64IMZBB-NEXT:    bnez a2, .LBB3_2
+; RV64IMZBB-NEXT:  # %bb.1:
+; RV64IMZBB-NEXT:    sext.w a1, a1
+; RV64IMZBB-NEXT:    minu a0, a1, a0
+; RV64IMZBB-NEXT:  .LBB3_2:
+; RV64IMZBB-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_umin_4:
+; RV32IMZICOND:       # %bb.0:
+; RV32IMZICOND-NEXT:    sltiu a2, a1, 128
+; RV32IMZICOND-NEXT:    addi a1, a1, -128
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV32IMZICOND-NEXT:    czero.nez a0, a1, a0
+; RV32IMZICOND-NEXT:    addi a0, a0, 128
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_umin_4:
+; RV64IMZICOND:       # %bb.0:
+; RV64IMZICOND-NEXT:    sext.w a1, a1
+; RV64IMZICOND-NEXT:    sltiu a2, a1, 128
+; RV64IMZICOND-NEXT:    addi a1, a1, -128
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV64IMZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64IMZICOND-NEXT:    addi a0, a0, 128
+; RV64IMZICOND-NEXT:    ret
+;
+; RV32IMBOTH-LABEL: select_umin_4:
+; RV32IMBOTH:       # %bb.0:
+; RV32IMBOTH-NEXT:    li a2, 128
+; RV32IMBOTH-NEXT:    minu a1, a1, a2
+; RV32IMBOTH-NEXT:    addi a1, a1, -128
+; RV32IMBOTH-NEXT:    czero.nez a0, a1, a0
+; RV32IMBOTH-NEXT:    addi a0, a0, 128
+; RV32IMBOTH-NEXT:    ret
+;
+; RV64IMBOTH-LABEL: select_umin_4:
+; RV64IMBOTH:       # %bb.0:
+; RV64IMBOTH-NEXT:    sext.w a1, a1
+; RV64IMBOTH-NEXT:    li a2, 128
+; RV64IMBOTH-NEXT:    minu a1, a1, a2
+; RV64IMBOTH-NEXT:    addi a1, a1, -128
+; RV64IMBOTH-NEXT:    czero.nez a0, a1, a0
+; RV64IMBOTH-NEXT:    addi a0, a0, 128
+; RV64IMBOTH-NEXT:    ret
+  %add = call i32 @llvm.umin(i32 %x, i32 128)
+  %sel = select i1 %cond, i32 128, i32 %add
+  ret i32 %sel
+}
+
+define i32 @select_umax_1(i1 zeroext %cond, i32 %a, i32 %b) {
+; RV32IM-LABEL: select_umax_1:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    bgeu a2, a1, .LBB4_3
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    beqz a0, .LBB4_4
+; RV32IM-NEXT:  .LBB4_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
+; RV32IM-NEXT:  .LBB4_3: # %entry
+; RV32IM-NEXT:    mv a1, a2
+; RV32IM-NEXT:    bnez a0, .LBB4_2
+; RV32IM-NEXT:  .LBB4_4: # %entry
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_umax_1:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sext.w a1, a1
+; RV64IM-NEXT:    sext.w a3, a2
+; RV64IM-NEXT:    bgeu a3, a1, .LBB4_3
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB4_4
+; RV64IM-NEXT:  .LBB4_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+; RV64IM-NEXT:  .LBB4_3: # %entry
+; RV64IM-NEXT:    mv a1, a3
+; RV64IM-NEXT:    bnez a0, .LBB4_2
+; RV64IM-NEXT:  .LBB4_4: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV32IMZBB-LABEL: select_umax_1:
+; RV32IMZBB:       # %bb.0: # %entry
+; RV32IMZBB-NEXT:    beqz a0, .LBB4_2
+; RV32IMZBB-NEXT:  # %bb.1:
+; RV32IMZBB-NEXT:    maxu a2, a1, a2
+; RV32IMZBB-NEXT:  .LBB4_2: # %entry
+; RV32IMZBB-NEXT:    mv a0, a2
+; RV32IMZBB-NEXT:    ret
+;
+; RV64IMZBB-LABEL: select_umax_1:
+; RV64IMZBB:       # %bb.0: # %entry
+; RV64IMZBB-NEXT:    beqz a0, .LBB4_2
+; RV64IMZBB-NEXT:  # %bb.1:
+; RV64IMZBB-NEXT:    sext.w a2, a2
+; RV64IMZBB-NEXT:    sext.w a1, a1
+; RV64IMZBB-NEXT:    maxu a2, a1, a2
+; RV64IMZBB-NEXT:  .LBB4_2: # %entry
+; RV64IMZBB-NEXT:    mv a0, a2
+; RV64IMZBB-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_umax_1:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    sltu a3, a2, a1
+; RV32IMZICOND-NEXT:    czero.nez a4, a2, a3
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a3
+; RV32IMZICOND-NEXT:    or a1, a1, a4
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    or a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_umax_1:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    sext.w a1, a1
+; RV64IMZICOND-NEXT:    sext.w a3, a2
+; RV64IMZICOND-NEXT:    sltu a4, a3, a1
+; RV64IMZICOND-NEXT:    czero.nez a3, a3, a4
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a4
+; RV64IMZICOND-NEXT:    or a1, a1, a3
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
+;
+; RV32IMBOTH-LABEL: select_umax_1:
+; RV32IMBOTH:       # %bb.0: # %entry
+; RV32IMBOTH-NEXT:    maxu a1, a1, a2
+; RV32IMBOTH-NEXT:    czero.nez a2, a2, a0
+; RV32IMBOTH-NEXT:    czero.eqz a0, a1, a0
+; RV32IMBOTH-NEXT:    or a0, a0, a2
+; RV32IMBOTH-NEXT:    ret
+;
+; RV64IMBOTH-LABEL: select_umax_1:
+; RV64IMBOTH:       # %bb.0: # %entry
+; RV64IMBOTH-NEXT:    sext.w a3, a2
+; RV64IMBOTH-NEXT:    sext.w a1, a1
+; RV64IMBOTH-NEXT:    maxu a1, a1, a3
+; RV64IMBOTH-NEXT:    czero.nez a2, a2, a0
+; RV64IMBOTH-NEXT:    czero.eqz a0, a1, a0
+; RV64IMBOTH-NEXT:    or a0, a0, a2
+; RV64IMBOTH-NEXT:    ret
+entry:
+  %c = call i32 @llvm.umax(i32 %a, i32 %b)
+  %res = select i1 %cond, i32 %c, i32 %b
+  ret i32 %res
+}
+
+define i32 @select_umax_2(i1 zeroext %cond, i32 %a, i32 %b) {
+; RV32IM-LABEL: select_umax_2:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    li a3, 32
+; RV32IM-NEXT:    mv a2, a1
+; RV32IM-NEXT:    bgeu a3, a1, .LBB5_3
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    beqz a0, .LBB5_4
+; RV32IM-NEXT:  .LBB5_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
+; RV32IM-NEXT:  .LBB5_3: # %entry
+; RV32IM-NEXT:    li a2, 32
+; RV32IM-NEXT:    bnez a0, .LBB5_2
+; RV32IM-NEXT:  .LBB5_4: # %entry
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_umax_2:
+; RV64IM:       # %bb.0: # %entry
+; RV64IM-NEXT:    sext.w a2, a1
+; RV64IM-NEXT:    li a3, 32
+; RV64IM-NEXT:    bgeu a3, a2, .LBB5_3
+; RV64IM-NEXT:  # %bb.1: # %entry
+; RV64IM-NEXT:    beqz a0, .LBB5_4
+; RV64IM-NEXT:  .LBB5_2: # %entry
+; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    ret
+; RV64IM-NEXT:  .LBB5_3: # %entry
+; RV64IM-NEXT:    li a2, 32
+; RV64IM-NEXT:    bnez a0, .LBB5_2
+; RV64IM-NEXT:  .LBB5_4: # %entry
+; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    ret
+;
+; RV32IMZBB-LABEL: select_umax_2:
+; RV32IMZBB:       # %bb.0: # %entry
+; RV32IMZBB-NEXT:    bnez a0, .LBB5_2
+; RV32IMZBB-NEXT:  # %bb.1: # %entry
+; RV32IMZBB-NEXT:    li a0, 32
+; RV32IMZBB-NEXT:    maxu a1, a1, a0
+; RV32IMZBB-NEXT:  .LBB5_2: # %entry
+; RV32IMZBB-NEXT:    mv a0, a1
+; RV32IMZBB-NEXT:    ret
+;
+; RV64IMZBB-LABEL: select_umax_2:
+; RV64IMZBB:       # %bb.0: # %entry
+; RV64IMZBB-NEXT:    bnez a0, .LBB5_2
+; RV64IMZBB-NEXT:  # %bb.1: # %entry
+; RV64IMZBB-NEXT:    sext.w a1, a1
+; RV64IMZBB-NEXT:    li a0, 32
+; RV64IMZBB-NEXT:    maxu a1, a1, a0
+; RV64IMZBB-NEXT:  .LBB5_2: # %entry
+; RV64IMZBB-NEXT:    mv a0, a1
+; RV64IMZBB-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_umax_2:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    sltiu a2, a1, 33
+; RV32IMZICOND-NEXT:    addi a3, a1, -32
+; RV32IMZICOND-NEXT:    czero.nez a2, a3, a2
+; RV32IMZICOND-NEXT:    addi a2, a2, 32
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    or a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_umax_2:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    sext.w a2, a1
+; RV64IMZICOND-NEXT:    sltiu a3, a2, 33
+; RV64IMZICOND-NEXT:    addi a2, a2, -32
+; RV64IMZICOND-NEXT:    czero.nez a2, a2, a3
+; RV64IMZICOND-NEXT:    addi a2, a2, 32
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
+;
+; RV32IMBOTH-LABEL: select_umax_2:
+; RV32IMBOTH:       # %bb.0: # %entry
+; RV32IMBOTH-NEXT:    li a2, 32
+; RV32IMBOTH-NEXT:    maxu a2, a1, a2
+; RV32IMBOTH-NEXT:    czero.eqz a1, a1, a0
+; RV32IMBOTH-NEXT:    czero.nez a0, a2, a0
+; RV32IMBOTH-NEXT:   ...
[truncated]

Copy link
Collaborator

@topperc topperc left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

LGTM

@preames preames merged commit 6e5d008 into llvm:main Sep 9, 2025
9 checks passed
@preames preames deleted the pr-riscv-select-minmax-tests branch September 9, 2025 21:56
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

3 participants