Skip to content

Commit

Permalink
[Local] invertCondition: try modifying an existing ICmpInst
Browse files Browse the repository at this point in the history
This avoids various cases where StructurizeCFG would otherwise insert an
xor i1 instruction, and it since it generally runs late in the pipeline,
instcombine does not clean up the xor-of-cmp pattern.

Differential Revision: https://reviews.llvm.org/D118478
  • Loading branch information
jayfoad committed Jan 31, 2022
1 parent 00bf475 commit a6b54dd
Show file tree
Hide file tree
Showing 23 changed files with 158 additions and 164 deletions.
8 changes: 8 additions & 0 deletions llvm/lib/Transforms/Utils/Local.cpp
Expand Up @@ -3343,6 +3343,14 @@ Value *llvm::invertCondition(Value *Condition) {
if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition))))
return I;

// Fourth: Modify an existing instruction
if (Condition->hasOneUse()) {
if (auto *CI = dyn_cast<CmpInst>(Condition)) {
CI->setPredicate(CI->getInversePredicate());
return Condition;
}
}

// Last option: Create a new instruction
auto *Inverted =
BinaryOperator::CreateNot(Condition, Condition->getName() + ".inv");
Expand Down
18 changes: 9 additions & 9 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/divergent-control-flow.ll
Expand Up @@ -139,8 +139,10 @@ define void @constrained_if_register_class() {
; CHECK-NEXT: s_load_dword s4, s[4:5], 0x0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: s_cmp_lg_u32 s4, 0
; CHECK-NEXT: s_cbranch_scc1 .LBB4_4
; CHECK-NEXT: ; %bb.1: ; %bb2
; CHECK-NEXT: s_cbranch_scc0 .LBB4_2
; CHECK-NEXT: .LBB4_1: ; %bb12
; CHECK-NEXT: s_setpc_b64 s[30:31]
; CHECK-NEXT: .LBB4_2: ; %bb2
; CHECK-NEXT: s_getpc_b64 s[4:5]
; CHECK-NEXT: s_add_u32 s4, s4, const.ptr@gotpcrel32@lo+4
; CHECK-NEXT: s_addc_u32 s5, s5, const.ptr@gotpcrel32@hi+12
Expand All @@ -153,15 +155,13 @@ define void @constrained_if_register_class() {
; CHECK-NEXT: s_mov_b32 s4, -1
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_cmp_gt_f32_e32 vcc, 1.0, v0
; CHECK-NEXT: s_cbranch_vccnz .LBB4_3
; CHECK-NEXT: ; %bb.2: ; %bb7
; CHECK-NEXT: s_cbranch_vccnz .LBB4_4
; CHECK-NEXT: ; %bb.3: ; %bb7
; CHECK-NEXT: s_mov_b32 s4, 0
; CHECK-NEXT: .LBB4_3: ; %bb8
; CHECK-NEXT: .LBB4_4: ; %bb8
; CHECK-NEXT: s_cmp_lg_u32 s4, 0
; CHECK-NEXT: s_cbranch_scc0 .LBB4_5
; CHECK-NEXT: .LBB4_4: ; %bb12
; CHECK-NEXT: s_setpc_b64 s[30:31]
; CHECK-NEXT: .LBB4_5: ; %bb11
; CHECK-NEXT: s_cbranch_scc1 .LBB4_1
; CHECK-NEXT: ; %bb.5: ; %bb11
; CHECK-NEXT: v_mov_b32_e32 v0, 4.0
; CHECK-NEXT: buffer_store_dword v0, v0, s[0:3], 0 offen
; CHECK-NEXT: s_waitcnt vmcnt(0)
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
Expand Up @@ -838,7 +838,7 @@ define <2 x i64> @v_sdiv_v2i64(<2 x i64> %num, <2 x i64> %den) {
; CGP-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
; CGP-NEXT: ; implicit-def: $vgpr4
; CGP-NEXT: ; implicit-def: $vgpr10
; CGP-NEXT: .LBB2_2: ; %Flow2
; CGP-NEXT: .LBB2_2: ; %Flow1
; CGP-NEXT: s_or_saveexec_b64 s[6:7], s[6:7]
; CGP-NEXT: s_xor_b64 exec, exec, s[6:7]
; CGP-NEXT: s_cbranch_execz .LBB2_4
Expand Down Expand Up @@ -3118,7 +3118,7 @@ define <2 x i64> @v_sdiv_v2i64_pow2_shl_denom(<2 x i64> %x, <2 x i64> %y) {
; CGP-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
; CGP-NEXT: ; implicit-def: $vgpr2_vgpr3
; CGP-NEXT: ; implicit-def: $vgpr8
; CGP-NEXT: .LBB8_2: ; %Flow2
; CGP-NEXT: .LBB8_2: ; %Flow1
; CGP-NEXT: s_or_saveexec_b64 s[8:9], s[8:9]
; CGP-NEXT: v_lshl_b64 v[9:10], s[6:7], v6
; CGP-NEXT: s_xor_b64 exec, exec, s[8:9]
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll
Expand Up @@ -824,7 +824,7 @@ define <2 x i64> @v_srem_v2i64(<2 x i64> %num, <2 x i64> %den) {
; CGP-NEXT: v_subb_u32_e32 v1, vcc, v2, v4, vcc
; CGP-NEXT: ; implicit-def: $vgpr4
; CGP-NEXT: ; implicit-def: $vgpr10
; CGP-NEXT: .LBB2_2: ; %Flow2
; CGP-NEXT: .LBB2_2: ; %Flow1
; CGP-NEXT: s_or_saveexec_b64 s[4:5], s[6:7]
; CGP-NEXT: s_xor_b64 exec, exec, s[4:5]
; CGP-NEXT: s_cbranch_execz .LBB2_4
Expand Down Expand Up @@ -3072,7 +3072,7 @@ define <2 x i64> @v_srem_v2i64_pow2_shl_denom(<2 x i64> %x, <2 x i64> %y) {
; CGP-NEXT: v_subb_u32_e32 v1, vcc, v2, v4, vcc
; CGP-NEXT: ; implicit-def: $vgpr2_vgpr3
; CGP-NEXT: ; implicit-def: $vgpr8
; CGP-NEXT: .LBB8_2: ; %Flow2
; CGP-NEXT: .LBB8_2: ; %Flow1
; CGP-NEXT: s_or_saveexec_b64 s[4:5], s[8:9]
; CGP-NEXT: v_lshl_b64 v[9:10], s[6:7], v6
; CGP-NEXT: s_xor_b64 exec, exec, s[4:5]
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll
Expand Up @@ -759,7 +759,7 @@ define <2 x i64> @v_udiv_v2i64(<2 x i64> %num, <2 x i64> %den) {
; CGP-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
; CGP-NEXT: ; implicit-def: $vgpr4
; CGP-NEXT: ; implicit-def: $vgpr10
; CGP-NEXT: .LBB2_2: ; %Flow2
; CGP-NEXT: .LBB2_2: ; %Flow1
; CGP-NEXT: s_or_saveexec_b64 s[6:7], s[6:7]
; CGP-NEXT: s_xor_b64 exec, exec, s[6:7]
; CGP-NEXT: s_cbranch_execz .LBB2_4
Expand Down Expand Up @@ -1641,7 +1641,7 @@ define <2 x i64> @v_udiv_v2i64_pow2_shl_denom(<2 x i64> %x, <2 x i64> %y) {
; CGP-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
; CGP-NEXT: ; implicit-def: $vgpr2_vgpr3
; CGP-NEXT: ; implicit-def: $vgpr8
; CGP-NEXT: .LBB8_2: ; %Flow2
; CGP-NEXT: .LBB8_2: ; %Flow1
; CGP-NEXT: s_or_saveexec_b64 s[8:9], s[8:9]
; CGP-NEXT: v_lshl_b64 v[9:10], s[6:7], v6
; CGP-NEXT: s_xor_b64 exec, exec, s[8:9]
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll
Expand Up @@ -750,7 +750,7 @@ define <2 x i64> @v_urem_v2i64(<2 x i64> %num, <2 x i64> %den) {
; CGP-NEXT: v_cndmask_b32_e32 v1, v2, v5, vcc
; CGP-NEXT: ; implicit-def: $vgpr4
; CGP-NEXT: ; implicit-def: $vgpr10
; CGP-NEXT: .LBB2_2: ; %Flow2
; CGP-NEXT: .LBB2_2: ; %Flow1
; CGP-NEXT: s_or_saveexec_b64 s[4:5], s[6:7]
; CGP-NEXT: s_xor_b64 exec, exec, s[4:5]
; CGP-NEXT: s_cbranch_execz .LBB2_4
Expand Down Expand Up @@ -2181,7 +2181,7 @@ define <2 x i64> @v_urem_v2i64_pow2_shl_denom(<2 x i64> %x, <2 x i64> %y) {
; CGP-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc
; CGP-NEXT: ; implicit-def: $vgpr2_vgpr3
; CGP-NEXT: ; implicit-def: $vgpr8
; CGP-NEXT: .LBB8_2: ; %Flow2
; CGP-NEXT: .LBB8_2: ; %Flow1
; CGP-NEXT: s_or_saveexec_b64 s[4:5], s[8:9]
; CGP-NEXT: v_lshl_b64 v[9:10], s[6:7], v6
; CGP-NEXT: s_xor_b64 exec, exec, s[4:5]
Expand Down
25 changes: 13 additions & 12 deletions llvm/test/CodeGen/AMDGPU/branch-relaxation.ll
Expand Up @@ -227,30 +227,31 @@ bb3:

; GCN-LABEL: {{^}}uniform_unconditional_min_long_forward_branch:
; GCN: s_cmp_eq_u32
; GCN: s_cbranch_scc{{[0-1]}} [[BB2:.LBB[0-9]+_[0-9]+]]
; GCN: s_cbranch_scc{{[0-1]}} [[BB1:.LBB[0-9]+_[0-9]+]]

; GCN-NEXT: {{.LBB[0-9]+_[0-9]+}}: ; %bb0
; GCN-NEXT: s_getpc_b64 s{{\[}}[[PC0_LO:[0-9]+]]:[[PC0_HI:[0-9]+]]{{\]}}
; GCN-NEXT: [[POST_GETPC:.Lpost_getpc[0-9]+]]:{{$}}
; GCN-NEXT: s_add_u32 s[[PC0_LO]], s[[PC0_LO]], ([[BB3:.LBB[0-9]_[0-9]+]]-[[POST_GETPC]])&4294967295
; GCN-NEXT: s_addc_u32 s[[PC0_HI]], s[[PC0_HI]], ([[BB3:.LBB[0-9]_[0-9]+]]-[[POST_GETPC]])>>32
; GCN-NEXT: s_add_u32 s[[PC0_LO]], s[[PC0_LO]], ([[BB4:.LBB[0-9]_[0-9]+]]-[[POST_GETPC]])&4294967295
; GCN-NEXT: s_addc_u32 s[[PC0_HI]], s[[PC0_HI]], ([[BB4]]-[[POST_GETPC]])>>32
; GCN-NEXT: s_setpc_b64 s{{\[}}[[PC0_LO]]:[[PC0_HI]]{{\]}}

; GCN: [[BB2]]: ; %bb3
; GCN: v_nop_e64
; GCN: v_nop_e64
; GCN: v_nop_e64
; GCN: v_nop_e64
; GCN: ;;#ASMEND

; GCN: [[BB3]]:
; GCN: [[BB1]]:
; GCN: v_mov_b32_e32 [[BB2_K:v[0-9]+]], 17
; GCN: buffer_store_dword [[BB2_K]]

; GCN: v_mov_b32_e32 [[BB4_K:v[0-9]+]], 63
; GCN: buffer_store_dword [[BB4_K]]
; GCN: s_endpgm
; GCN-NEXT: .Lfunc_end{{[0-9]+}}:

; GCN: [[BB4]]: ; %bb3
; GCN: v_nop_e64
; GCN: v_nop_e64
; GCN: v_nop_e64
; GCN: v_nop_e64
; GCN: ;;#ASMEND

; GCN: .Lfunc_end{{[0-9]+}}:
define amdgpu_kernel void @uniform_unconditional_min_long_forward_branch(i32 addrspace(1)* %arg, i32 %arg1) {
bb0:
%tmp = icmp ne i32 %arg1, 0
Expand Down
32 changes: 16 additions & 16 deletions llvm/test/CodeGen/AMDGPU/ctpop16.ll
Expand Up @@ -1502,30 +1502,30 @@ define amdgpu_kernel void @ctpop_i16_in_br(i16 addrspace(1)* %out, i16 addrspace
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshr_b32 s5, s4, 16
; SI-NEXT: s_cmp_lg_u32 s5, 0
; SI-NEXT: s_cbranch_scc0 .LBB14_2
; SI-NEXT: s_cbranch_scc0 .LBB14_4
; SI-NEXT: ; %bb.1: ; %else
; SI-NEXT: s_mov_b32 s11, 0xf000
; SI-NEXT: s_mov_b32 s10, -1
; SI-NEXT: s_mov_b32 s8, s2
; SI-NEXT: s_mov_b32 s9, s3
; SI-NEXT: buffer_load_ushort v0, off, s[8:11], 0 offset:2
; SI-NEXT: s_mov_b64 s[2:3], 0
; SI-NEXT: s_cbranch_execz .LBB14_3
; SI-NEXT: s_branch .LBB14_4
; SI-NEXT: .LBB14_2:
; SI-NEXT: s_mov_b64 s[2:3], -1
; SI-NEXT: v_mov_b32_e32 v0, 0
; SI-NEXT: .LBB14_3: ; %if
; SI-NEXT: s_cbranch_execnz .LBB14_3
; SI-NEXT: .LBB14_2: ; %if
; SI-NEXT: s_and_b32 s2, s4, 0xffff
; SI-NEXT: s_bcnt1_i32_b32 s2, s2
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s2
; SI-NEXT: .LBB14_4: ; %endif
; SI-NEXT: .LBB14_3: ; %endif
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
; SI-NEXT: .LBB14_4:
; SI-NEXT: s_mov_b64 s[2:3], -1
; SI-NEXT: v_mov_b32_e32 v0, 0
; SI-NEXT: s_branch .LBB14_2
;
; VI-LABEL: ctpop_i16_in_br:
; VI: ; %bb.0: ; %entry
Expand All @@ -1535,30 +1535,30 @@ define amdgpu_kernel void @ctpop_i16_in_br(i16 addrspace(1)* %out, i16 addrspace
; VI-NEXT: s_lshr_b32 s5, s4, 16
; VI-NEXT: v_cmp_ne_u16_e64 s[6:7], s5, 0
; VI-NEXT: s_and_b64 vcc, exec, s[6:7]
; VI-NEXT: s_cbranch_vccz .LBB14_2
; VI-NEXT: s_cbranch_vccz .LBB14_4
; VI-NEXT: ; %bb.1: ; %else
; VI-NEXT: s_mov_b32 s11, 0xf000
; VI-NEXT: s_mov_b32 s10, -1
; VI-NEXT: s_mov_b32 s8, s2
; VI-NEXT: s_mov_b32 s9, s3
; VI-NEXT: buffer_load_ushort v0, off, s[8:11], 0 offset:2
; VI-NEXT: s_mov_b64 s[2:3], 0
; VI-NEXT: s_cbranch_execz .LBB14_3
; VI-NEXT: s_branch .LBB14_4
; VI-NEXT: .LBB14_2:
; VI-NEXT: s_mov_b64 s[2:3], -1
; VI-NEXT: ; implicit-def: $vgpr0
; VI-NEXT: .LBB14_3: ; %if
; VI-NEXT: s_cbranch_execnz .LBB14_3
; VI-NEXT: .LBB14_2: ; %if
; VI-NEXT: s_and_b32 s2, s4, 0xffff
; VI-NEXT: s_bcnt1_i32_b32 s2, s2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s2
; VI-NEXT: .LBB14_4: ; %endif
; VI-NEXT: .LBB14_3: ; %endif
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_short v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
; VI-NEXT: .LBB14_4:
; VI-NEXT: s_mov_b64 s[2:3], -1
; VI-NEXT: ; implicit-def: $vgpr0
; VI-NEXT: s_branch .LBB14_2
;
; EG-LABEL: ctpop_i16_in_br:
; EG: ; %bb.0: ; %entry
Expand Down
24 changes: 12 additions & 12 deletions llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
Expand Up @@ -1534,50 +1534,50 @@ define amdgpu_kernel void @insert_split_bb(<2 x i32> addrspace(1)* %out, i32 add
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s6, 0
; SI-NEXT: s_cbranch_scc0 .LBB30_2
; SI-NEXT: s_cbranch_scc0 .LBB30_4
; SI-NEXT: ; %bb.1: ; %else
; SI-NEXT: s_load_dword s7, s[2:3], 0x1
; SI-NEXT: s_mov_b64 s[4:5], 0
; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b64 vcc, vcc
; SI-NEXT: s_cbranch_vccz .LBB30_3
; SI-NEXT: s_branch .LBB30_4
; SI-NEXT: .LBB30_2:
; SI-NEXT: .LBB30_3: ; %if
; SI-NEXT: s_cbranch_vccnz .LBB30_3
; SI-NEXT: .LBB30_2: ; %if
; SI-NEXT: s_load_dword s7, s[2:3], 0x0
; SI-NEXT: .LBB30_4: ; %endif
; SI-NEXT: .LBB30_3: ; %endif
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s6
; SI-NEXT: s_mov_b32 s3, 0x100f000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v1, s7
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
; SI-NEXT: .LBB30_4:
; SI-NEXT: s_branch .LBB30_2
;
; VI-LABEL: insert_split_bb:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s6, s[4:5], 0x10
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s6, 0
; VI-NEXT: s_cbranch_scc0 .LBB30_2
; VI-NEXT: s_cbranch_scc0 .LBB30_4
; VI-NEXT: ; %bb.1: ; %else
; VI-NEXT: s_load_dword s7, s[2:3], 0x4
; VI-NEXT: s_cbranch_execz .LBB30_3
; VI-NEXT: s_branch .LBB30_4
; VI-NEXT: .LBB30_2:
; VI-NEXT: .LBB30_3: ; %if
; VI-NEXT: s_cbranch_execnz .LBB30_3
; VI-NEXT: .LBB30_2: ; %if
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_load_dword s7, s[2:3], 0x0
; VI-NEXT: .LBB30_4: ; %endif
; VI-NEXT: .LBB30_3: ; %endif
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s6
; VI-NEXT: s_mov_b32 s3, 0x1100f000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: v_mov_b32_e32 v1, s7
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; VI-NEXT: s_endpgm
; VI-NEXT: .LBB30_4:
; VI-NEXT: s_branch .LBB30_2
entry:
%0 = insertelement <2 x i32> undef, i32 %a, i32 0
%1 = icmp eq i32 %a, 0
Expand Down
5 changes: 2 additions & 3 deletions llvm/test/CodeGen/AMDGPU/loop_break.ll
Expand Up @@ -17,11 +17,10 @@ define amdgpu_kernel void @break_loop(i32 %arg) #0 {
; OPT-NEXT: br i1 [[CMP0]], label [[BB4:%.*]], label [[FLOW]]
; OPT: bb4:
; OPT-NEXT: [[LOAD:%.*]] = load volatile i32, i32 addrspace(1)* undef, align 4
; OPT-NEXT: [[CMP1:%.*]] = icmp slt i32 [[MY_TMP]], [[LOAD]]
; OPT-NEXT: [[TMP0:%.*]] = xor i1 [[CMP1]], true
; OPT-NEXT: [[CMP1:%.*]] = icmp sge i32 [[MY_TMP]], [[LOAD]]
; OPT-NEXT: br label [[FLOW]]
; OPT: Flow:
; OPT-NEXT: [[TMP1:%.*]] = phi i1 [ [[TMP0]], [[BB4]] ], [ true, [[BB1]] ]
; OPT-NEXT: [[TMP1:%.*]] = phi i1 [ [[CMP1]], [[BB4]] ], [ true, [[BB1]] ]
; OPT-NEXT: [[TMP2]] = call i64 @llvm.amdgcn.if.break.i64(i1 [[TMP1]], i64 [[PHI_BROKEN]])
; OPT-NEXT: [[TMP3:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP2]])
; OPT-NEXT: br i1 [[TMP3]], label [[BB9:%.*]], label [[BB1]]
Expand Down

0 comments on commit a6b54dd

Please sign in to comment.