Skip to content

Commit

Permalink
[AMDGPU][GlobalISel] Better legalization of 32-bit ctlz/cttz
Browse files Browse the repository at this point in the history
Differential Revision: https://reviews.llvm.org/D107474
  • Loading branch information
jayfoad committed Aug 6, 2021
1 parent 24b67a9 commit 83610d4
Show file tree
Hide file tree
Showing 13 changed files with 319 additions and 349 deletions.
26 changes: 25 additions & 1 deletion llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
Expand Up @@ -940,7 +940,7 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
.clampScalar(1, S32, S64)
.widenScalarToNextPow2(0, 32)
.widenScalarToNextPow2(1, 32)
.lower();
.custom();

// The 64-bit versions produce 32-bit results, but only on the SALU.
getActionDefinitionsBuilder({G_CTLZ_ZERO_UNDEF, G_CTTZ_ZERO_UNDEF})
Expand Down Expand Up @@ -1758,6 +1758,9 @@ bool AMDGPULegalizerInfo::legalizeCustom(LegalizerHelper &Helper,
return legalizeFFloor(MI, MRI, B);
case TargetOpcode::G_BUILD_VECTOR:
return legalizeBuildVector(MI, MRI, B);
case TargetOpcode::G_CTLZ:
case TargetOpcode::G_CTTZ:
return legalizeCTLZ_CTTZ(MI, MRI, B);
default:
return false;
}
Expand Down Expand Up @@ -2779,6 +2782,27 @@ bool AMDGPULegalizerInfo::legalizeBuildVector(
return true;
}

// Legalize ctlz/cttz to ffbh/ffbl instead of the default legalization to
// ctlz/cttz_zero_undef. This allows us to fix up the result for the zero input
// case with a single min instruction instead of a compare+select.
bool AMDGPULegalizerInfo::legalizeCTLZ_CTTZ(MachineInstr &MI,
MachineRegisterInfo &MRI,
MachineIRBuilder &B) const {
Register Dst = MI.getOperand(0).getReg();
Register Src = MI.getOperand(1).getReg();
LLT DstTy = MRI.getType(Dst);
LLT SrcTy = MRI.getType(Src);

unsigned NewOpc = MI.getOpcode() == AMDGPU::G_CTLZ
? AMDGPU::G_AMDGPU_FFBH_U32
: AMDGPU::G_AMDGPU_FFBL_B32;
auto Tmp = B.buildInstr(NewOpc, {DstTy}, {Src});
B.buildUMin(Dst, Tmp, B.buildConstant(DstTy, SrcTy.getSizeInBits()));

MI.eraseFromParent();
return true;
}

// Check that this is a G_XOR x, -1
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI) {
if (MI.getOpcode() != TargetOpcode::G_XOR)
Expand Down
2 changes: 2 additions & 0 deletions llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
Expand Up @@ -89,6 +89,8 @@ class AMDGPULegalizerInfo final : public LegalizerInfo {

bool legalizeBuildVector(MachineInstr &MI, MachineRegisterInfo &MRI,
MachineIRBuilder &B) const;
bool legalizeCTLZ_CTTZ(MachineInstr &MI, MachineRegisterInfo &MRI,
MachineIRBuilder &B) const;

bool loadInputValue(Register DstReg, MachineIRBuilder &B,
const ArgDescriptor *Arg,
Expand Down
24 changes: 17 additions & 7 deletions llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
Expand Up @@ -2513,6 +2513,8 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
llvm_unreachable("narrowScalar should have succeeded");
return;
}
case AMDGPU::G_AMDGPU_FFBH_U32:
case AMDGPU::G_AMDGPU_FFBL_B32:
case AMDGPU::G_CTLZ_ZERO_UNDEF:
case AMDGPU::G_CTTZ_ZERO_UNDEF: {
const RegisterBank *DstBank =
Expand All @@ -2528,18 +2530,26 @@ void AMDGPURegisterBankInfo::applyMappingImpl(

// We can narrow this more efficiently than Helper can by using ffbh/ffbl
// which return -1 when the input is zero:
// (ctlz_zero_undef hi:lo) -> (umin (ffbh hi), 32 + (ffbh lo))
// (cttz_zero_undef hi:lo) -> (umin 32 + (ffbl hi), (ffbl lo))
// (ctlz_zero_undef hi:lo) -> (umin (ffbh hi), (add (ffbh lo), 32))
// (cttz_zero_undef hi:lo) -> (umin (add (ffbl hi), 32), (ffbl lo))
// (ffbh hi:lo) -> (umin (ffbh hi), (uaddsat (ffbh lo), 32))
// (ffbl hi:lo) -> (umin (uaddsat (ffbh hi), 32), (ffbh lo))
ApplyRegBankMapping ApplyVALU(*this, MRI, &AMDGPU::VGPRRegBank);
MachineIRBuilder B(MI, ApplyVALU);
SmallVector<Register, 2> SrcRegs(OpdMapper.getVRegs(1));
unsigned NewOpc = Opc == AMDGPU::G_CTLZ_ZERO_UNDEF
? AMDGPU::G_AMDGPU_FFBH_U32
: AMDGPU::G_AMDGPU_FFBL_B32;
unsigned Idx = Opc == AMDGPU::G_CTLZ_ZERO_UNDEF;
: Opc == AMDGPU::G_CTLZ_ZERO_UNDEF
? AMDGPU::G_AMDGPU_FFBL_B32
: Opc;
unsigned Idx = NewOpc == AMDGPU::G_AMDGPU_FFBH_U32;
auto X = B.buildInstr(NewOpc, {S32}, {SrcRegs[Idx]});
auto Y = B.buildInstr(NewOpc, {S32}, {SrcRegs[Idx ^ 1]});
Y = B.buildAdd(S32, Y, B.buildConstant(S32, 32));
unsigned AddOpc =
Opc == AMDGPU::G_CTLZ_ZERO_UNDEF || Opc == AMDGPU::G_CTTZ_ZERO_UNDEF
? AMDGPU::G_ADD
: AMDGPU::G_UADDSAT;
Y = B.buildInstr(AddOpc, {S32}, {Y, B.buildConstant(S32, 32)});
Register DstReg = MI.getOperand(0).getReg();
B.buildUMin(DstReg, X, Y);
MI.eraseFromParent();
Expand Down Expand Up @@ -3651,8 +3661,6 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case AMDGPU::G_INTRINSIC_TRUNC:
case AMDGPU::G_BSWAP: // TODO: Somehow expand for scalar?
case AMDGPU::G_FSHR: // TODO: Expand for scalar
case AMDGPU::G_AMDGPU_FFBH_U32:
case AMDGPU::G_AMDGPU_FFBL_B32:
case AMDGPU::G_AMDGPU_FMIN_LEGACY:
case AMDGPU::G_AMDGPU_FMAX_LEGACY:
case AMDGPU::G_AMDGPU_RCP_IFLAG:
Expand Down Expand Up @@ -3758,6 +3766,8 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
OpdsMapping[0] = OpdsMapping[1] = AMDGPU::getValueMapping(BankID, Size);
break;
}
case AMDGPU::G_AMDGPU_FFBH_U32:
case AMDGPU::G_AMDGPU_FFBL_B32:
case AMDGPU::G_CTLZ_ZERO_UNDEF:
case AMDGPU::G_CTTZ_ZERO_UNDEF: {
unsigned Size = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
Expand Down
25 changes: 21 additions & 4 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/cvt_f32_ubyte.ll
Expand Up @@ -1123,16 +1123,33 @@ define float @v_test_uitofp_i64_byte_to_f32(i64 %arg0) {
; SI-LABEL: v_test_uitofp_i64_byte_to_f32:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_ffbh_u32_e32 v2, 0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
; SI-NEXT: v_ldexp_f32_e64 v0, v0, 0
; SI-NEXT: v_mov_b32_e32 v1, 0
; SI-NEXT: v_min_u32_e32 v2, 32, v2
; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], v2
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_cvt_f32_u32_e32 v0, v0
; SI-NEXT: v_sub_i32_e32 v1, vcc, 32, v2
; SI-NEXT: v_ldexp_f32_e32 v0, v0, v1
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: v_test_uitofp_i64_byte_to_f32:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
; VI-NEXT: v_ldexp_f32 v0, v0, 0
; VI-NEXT: v_ffbh_u32_e32 v2, 0
; VI-NEXT: v_and_b32_e32 v0, 0xff, v0
; VI-NEXT: v_mov_b32_e32 v1, 0
; VI-NEXT: v_min_u32_e32 v2, 32, v2
; VI-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; VI-NEXT: v_or_b32_e32 v0, v1, v0
; VI-NEXT: v_cvt_f32_u32_e32 v0, v0
; VI-NEXT: v_sub_u32_e32 v1, vcc, 32, v2
; VI-NEXT: v_ldexp_f32 v0, v0, v1
; VI-NEXT: s_setpc_b64 s[30:31]
%masked = and i64 %arg0, 255
%itofp = uitofp i64 %masked to float
Expand Down
129 changes: 53 additions & 76 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ctlz.mir
Expand Up @@ -9,12 +9,10 @@ body: |
liveins: $vgpr0
; CHECK-LABEL: name: ctlz_s32_s32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[COPY]](s32)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY]](s32), [[C]]
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; CHECK: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C1]], [[CTLZ_ZERO_UNDEF]]
; CHECK: $vgpr0 = COPY [[SELECT]](s32)
; CHECK: [[AMDGPU_FFBH_U32_:%[0-9]+]]:_(s32) = G_AMDGPU_FFBH_U32 [[COPY]](s32)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; CHECK: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[AMDGPU_FFBH_U32_]], [[C]]
; CHECK: $vgpr0 = COPY [[UMIN]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = G_CTLZ %0
$vgpr0 = COPY %1
Expand All @@ -28,12 +26,10 @@ body: |
liveins: $vgpr0_vgpr1
; CHECK-LABEL: name: ctlz_s32_s64
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; CHECK: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[COPY]](s64)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY]](s64), [[C]]
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
; CHECK: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C1]], [[CTLZ_ZERO_UNDEF]]
; CHECK: $vgpr0 = COPY [[SELECT]](s32)
; CHECK: [[AMDGPU_FFBH_U32_:%[0-9]+]]:_(s32) = G_AMDGPU_FFBH_U32 [[COPY]](s64)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
; CHECK: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[AMDGPU_FFBH_U32_]], [[C]]
; CHECK: $vgpr0 = COPY [[UMIN]](s32)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s32) = G_CTLZ %0
$vgpr0 = COPY %1
Expand All @@ -47,12 +43,10 @@ body: |
liveins: $vgpr0_vgpr1
; CHECK-LABEL: name: ctlz_s64_s64
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; CHECK: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[COPY]](s64)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY]](s64), [[C]]
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
; CHECK: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C1]], [[CTLZ_ZERO_UNDEF]]
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[SELECT]](s32)
; CHECK: [[AMDGPU_FFBH_U32_:%[0-9]+]]:_(s32) = G_AMDGPU_FFBH_U32 [[COPY]](s64)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
; CHECK: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[AMDGPU_FFBH_U32_]], [[C]]
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[UMIN]](s32)
; CHECK: $vgpr0_vgpr1 = COPY [[ZEXT]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = G_CTLZ %0
Expand All @@ -67,14 +61,12 @@ body: |
liveins: $vgpr0
; CHECK-LABEL: name: ctlz_s16_s32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[COPY]](s32)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY]](s32), [[C]]
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; CHECK: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C1]], [[CTLZ_ZERO_UNDEF]]
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[SELECT]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C2]]
; CHECK: [[AMDGPU_FFBH_U32_:%[0-9]+]]:_(s32) = G_AMDGPU_FFBH_U32 [[COPY]](s32)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; CHECK: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[AMDGPU_FFBH_U32_]], [[C]]
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[UMIN]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]]
; CHECK: $vgpr0 = COPY [[AND]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s16) = G_CTLZ %0
Expand All @@ -93,13 +85,11 @@ body: |
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
; CHECK: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[AND]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND]](s32), [[C1]]
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; CHECK: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C2]], [[CTLZ_ZERO_UNDEF]]
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SELECT]], [[C3]]
; CHECK: [[AMDGPU_FFBH_U32_:%[0-9]+]]:_(s32) = G_AMDGPU_FFBH_U32 [[AND]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; CHECK: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[AMDGPU_FFBH_U32_]], [[C1]]
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[UMIN]], [[C2]]
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[SUB]](s32)
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C]]
Expand All @@ -120,15 +110,12 @@ body: |
; CHECK-LABEL: name: ctlz_v2s32_v2s32
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
; CHECK: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV]](s32)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV]](s32), [[C]]
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; CHECK: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C1]], [[CTLZ_ZERO_UNDEF]]
; CHECK: [[CTLZ_ZERO_UNDEF1:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV1]](s32)
; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV1]](s32), [[C]]
; CHECK: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C1]], [[CTLZ_ZERO_UNDEF1]]
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SELECT]](s32), [[SELECT1]](s32)
; CHECK: [[AMDGPU_FFBH_U32_:%[0-9]+]]:_(s32) = G_AMDGPU_FFBH_U32 [[UV]](s32)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; CHECK: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[AMDGPU_FFBH_U32_]], [[C]]
; CHECK: [[AMDGPU_FFBH_U32_1:%[0-9]+]]:_(s32) = G_AMDGPU_FFBH_U32 [[UV1]](s32)
; CHECK: [[UMIN1:%[0-9]+]]:_(s32) = G_UMIN [[AMDGPU_FFBH_U32_1]], [[C]]
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[UMIN]](s32), [[UMIN1]](s32)
; CHECK: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s32>) = G_CTLZ %0
Expand All @@ -144,15 +131,12 @@ body: |
; CHECK-LABEL: name: ctlz_v2s32_v2s64
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
; CHECK: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV]](s64)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV]](s64), [[C]]
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
; CHECK: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C1]], [[CTLZ_ZERO_UNDEF]]
; CHECK: [[CTLZ_ZERO_UNDEF1:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV1]](s64)
; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV1]](s64), [[C]]
; CHECK: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C1]], [[CTLZ_ZERO_UNDEF1]]
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SELECT]](s32), [[SELECT1]](s32)
; CHECK: [[AMDGPU_FFBH_U32_:%[0-9]+]]:_(s32) = G_AMDGPU_FFBH_U32 [[UV]](s64)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
; CHECK: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[AMDGPU_FFBH_U32_]], [[C]]
; CHECK: [[AMDGPU_FFBH_U32_1:%[0-9]+]]:_(s32) = G_AMDGPU_FFBH_U32 [[UV1]](s64)
; CHECK: [[UMIN1:%[0-9]+]]:_(s32) = G_UMIN [[AMDGPU_FFBH_U32_1]], [[C]]
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[UMIN]](s32), [[UMIN1]](s32)
; CHECK: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
%1:_(<2 x s32>) = G_CTLZ %0
Expand All @@ -173,19 +157,16 @@ body: |
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]]
; CHECK: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[AND]](s32)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND]](s32), [[C2]]
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; CHECK: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C3]], [[CTLZ_ZERO_UNDEF]]
; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SELECT]], [[C]]
; CHECK: [[AMDGPU_FFBH_U32_:%[0-9]+]]:_(s32) = G_AMDGPU_FFBH_U32 [[AND]](s32)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; CHECK: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[AMDGPU_FFBH_U32_]], [[C2]]
; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[UMIN]], [[C]]
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[SUB]](s32)
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]]
; CHECK: [[CTLZ_ZERO_UNDEF1:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[AND1]](s32)
; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s32), [[C2]]
; CHECK: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C3]], [[CTLZ_ZERO_UNDEF1]]
; CHECK: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SELECT1]], [[C]]
; CHECK: [[AMDGPU_FFBH_U32_1:%[0-9]+]]:_(s32) = G_AMDGPU_FFBH_U32 [[AND1]](s32)
; CHECK: [[UMIN1:%[0-9]+]]:_(s32) = G_UMIN [[AMDGPU_FFBH_U32_1]], [[C2]]
; CHECK: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[UMIN1]], [[C]]
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SUB1]](s32)
; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C1]]
Expand All @@ -212,13 +193,11 @@ body: |
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 127
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
; CHECK: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[AND]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND]](s32), [[C1]]
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; CHECK: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C2]], [[CTLZ_ZERO_UNDEF]]
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 25
; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SELECT]], [[C3]]
; CHECK: [[AMDGPU_FFBH_U32_:%[0-9]+]]:_(s32) = G_AMDGPU_FFBH_U32 [[AND]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; CHECK: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[AMDGPU_FFBH_U32_]], [[C1]]
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 25
; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[UMIN]], [[C2]]
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[SUB]](s32)
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C]]
Expand All @@ -242,15 +221,13 @@ body: |
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8589934591
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY [[COPY]](s64)
; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
; CHECK: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[AND]](s64)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND]](s64), [[C1]]
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
; CHECK: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C2]], [[CTLZ_ZERO_UNDEF]]
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[SELECT]](s32)
; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
; CHECK: [[AMDGPU_FFBH_U32_:%[0-9]+]]:_(s32) = G_AMDGPU_FFBH_U32 [[AND]](s64)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
; CHECK: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[AMDGPU_FFBH_U32_]], [[C1]]
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[UMIN]](s32)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ZEXT]](s64)
; CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C3]](s64)
; CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; CHECK: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[UV2]]
; CHECK: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]]
; CHECK: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[USUBO]](s32)
Expand Down

0 comments on commit 83610d4

Please sign in to comment.