diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll index d641063984eb8..34ff84d5aac36 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll @@ -3,7 +3,6 @@ ; RUN: llc -march=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -mattr=+wavefrontsize32,-wavefrontsize64 -global-isel -verify-machineinstrs < %s | FileCheck %s declare i32 @llvm.amdgcn.ballot.i32(i1) -declare i64 @llvm.amdgcn.ballot.i64(i1) declare i32 @llvm.ctpop.i32(i32) ; Test ballot(0) @@ -204,30 +203,6 @@ false: ret i32 33 } -define amdgpu_cs i32 @branch_divergent_ballot64_ne_zero_compare(i32 %v) { -; CHECK-LABEL: branch_divergent_ballot64_ne_zero_compare: -; CHECK: ; %bb.0: -; CHECK-NEXT: v_cmp_gt_u32_e64 s0, 12, v0 -; CHECK-NEXT: s_mov_b32 s1, 0 -; CHECK-NEXT: s_cmp_eq_u64 s[0:1], 0 -; CHECK-NEXT: s_cbranch_scc1 .LBB12_2 -; CHECK-NEXT: ; %bb.1: ; %true -; CHECK-NEXT: s_mov_b32 s0, 42 -; CHECK-NEXT: s_branch .LBB12_3 -; CHECK-NEXT: .LBB12_2: ; %false -; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB12_3 -; CHECK-NEXT: .LBB12_3: - %c = icmp ult i32 %v, 12 - %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c) - %ballot_ne_zero = icmp ne i64 %ballot, 0 - br i1 %ballot_ne_zero, label %true, label %false -true: - ret i32 42 -false: - ret i32 33 -} - define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_compare(i32 inreg %v) { ; CHECK-LABEL: branch_uniform_ballot_ne_zero_compare: ; CHECK: ; %bb.0: @@ -236,14 +211,14 @@ define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_compare(i32 inreg %v) { ; CHECK-NEXT: s_and_b32 s0, 1, s0 ; CHECK-NEXT: v_cmp_ne_u32_e64 s0, 0, s0 ; CHECK-NEXT: s_cmp_eq_u32 s0, 0 -; CHECK-NEXT: s_cbranch_scc1 .LBB13_2 +; CHECK-NEXT: s_cbranch_scc1 .LBB12_2 ; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 -; CHECK-NEXT: s_branch .LBB13_3 -; CHECK-NEXT: .LBB13_2: ; %false +; CHECK-NEXT: s_branch .LBB12_3 +; CHECK-NEXT: .LBB12_2: ; %false ; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB13_3 -; CHECK-NEXT: .LBB13_3: +; CHECK-NEXT: s_branch .LBB12_3 +; CHECK-NEXT: .LBB12_3: %c = icmp ult i32 %v, 12 %ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %c) %ballot_ne_zero = icmp ne i32 %ballot, 0 @@ -259,14 +234,14 @@ define amdgpu_cs i32 @branch_divergent_ballot_eq_zero_compare(i32 %v) { ; CHECK: ; %bb.0: ; CHECK-NEXT: v_cmp_gt_u32_e32 vcc_lo, 12, v0 ; CHECK-NEXT: s_cmp_lg_u32 vcc_lo, 0 -; CHECK-NEXT: s_cbranch_scc0 .LBB14_2 +; CHECK-NEXT: s_cbranch_scc0 .LBB13_2 ; CHECK-NEXT: ; %bb.1: ; %false ; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB14_3 -; CHECK-NEXT: .LBB14_2: ; %true +; CHECK-NEXT: s_branch .LBB13_3 +; CHECK-NEXT: .LBB13_2: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 -; CHECK-NEXT: s_branch .LBB14_3 -; CHECK-NEXT: .LBB14_3: +; CHECK-NEXT: s_branch .LBB13_3 +; CHECK-NEXT: .LBB13_3: %c = icmp ult i32 %v, 12 %ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %c) %ballot_eq_zero = icmp eq i32 %ballot, 0 @@ -285,14 +260,14 @@ define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_compare(i32 inreg %v) { ; CHECK-NEXT: s_and_b32 s0, 1, s0 ; CHECK-NEXT: v_cmp_ne_u32_e64 s0, 0, s0 ; CHECK-NEXT: s_cmp_lg_u32 s0, 0 -; CHECK-NEXT: s_cbranch_scc0 .LBB15_2 +; CHECK-NEXT: s_cbranch_scc0 .LBB14_2 ; CHECK-NEXT: ; %bb.1: ; %false ; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB15_3 -; CHECK-NEXT: .LBB15_2: ; %true +; CHECK-NEXT: s_branch .LBB14_3 +; CHECK-NEXT: .LBB14_2: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 -; CHECK-NEXT: s_branch .LBB15_3 -; CHECK-NEXT: .LBB15_3: +; CHECK-NEXT: s_branch .LBB14_3 +; CHECK-NEXT: .LBB14_3: %c = icmp ult i32 %v, 12 %ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %c) %ballot_eq_zero = icmp eq i32 %ballot, 0 @@ -310,14 +285,14 @@ define amdgpu_cs i32 @branch_divergent_ballot_ne_zero_and(i32 %v1, i32 %v2) { ; CHECK-NEXT: v_cmp_lt_u32_e64 s0, 34, v1 ; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 ; CHECK-NEXT: s_cmp_eq_u32 s0, 0 -; CHECK-NEXT: s_cbranch_scc1 .LBB16_2 +; CHECK-NEXT: s_cbranch_scc1 .LBB15_2 ; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 -; CHECK-NEXT: s_branch .LBB16_3 -; CHECK-NEXT: .LBB16_2: ; %false +; CHECK-NEXT: s_branch .LBB15_3 +; CHECK-NEXT: .LBB15_2: ; %false ; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB16_3 -; CHECK-NEXT: .LBB16_3: +; CHECK-NEXT: s_branch .LBB15_3 +; CHECK-NEXT: .LBB15_3: %v1c = icmp ult i32 %v1, 12 %v2c = icmp ugt i32 %v2, 34 %c = and i1 %v1c, %v2c @@ -330,34 +305,6 @@ false: ret i32 33 } -define amdgpu_cs i32 @branch_divergent_ballot64_ne_zero_and(i32 %v1, i32 %v2) { -; CHECK-LABEL: branch_divergent_ballot64_ne_zero_and: -; CHECK: ; %bb.0: -; CHECK-NEXT: v_cmp_gt_u32_e32 vcc_lo, 12, v0 -; CHECK-NEXT: v_cmp_lt_u32_e64 s0, 34, v1 -; CHECK-NEXT: s_mov_b32 s1, 0 -; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 -; CHECK-NEXT: s_cmp_eq_u64 s[0:1], 0 -; CHECK-NEXT: s_cbranch_scc1 .LBB17_2 -; CHECK-NEXT: ; %bb.1: ; %true -; CHECK-NEXT: s_mov_b32 s0, 42 -; CHECK-NEXT: s_branch .LBB17_3 -; CHECK-NEXT: .LBB17_2: ; %false -; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB17_3 -; CHECK-NEXT: .LBB17_3: - %v1c = icmp ult i32 %v1, 12 - %v2c = icmp ugt i32 %v2, 34 - %c = and i1 %v1c, %v2c - %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c) - %ballot_ne_zero = icmp ne i64 %ballot, 0 - br i1 %ballot_ne_zero, label %true, label %false -true: - ret i32 42 -false: - ret i32 33 -} - define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_and(i32 inreg %v1, i32 inreg %v2) { ; CHECK-LABEL: branch_uniform_ballot_ne_zero_and: ; CHECK: ; %bb.0: @@ -369,14 +316,14 @@ define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_and(i32 inreg %v1, i32 inreg ; CHECK-NEXT: s_and_b32 s0, 1, s0 ; CHECK-NEXT: v_cmp_ne_u32_e64 s0, 0, s0 ; CHECK-NEXT: s_cmp_eq_u32 s0, 0 -; CHECK-NEXT: s_cbranch_scc1 .LBB18_2 +; CHECK-NEXT: s_cbranch_scc1 .LBB16_2 ; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 -; CHECK-NEXT: s_branch .LBB18_3 -; CHECK-NEXT: .LBB18_2: ; %false +; CHECK-NEXT: s_branch .LBB16_3 +; CHECK-NEXT: .LBB16_2: ; %false ; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB18_3 -; CHECK-NEXT: .LBB18_3: +; CHECK-NEXT: s_branch .LBB16_3 +; CHECK-NEXT: .LBB16_3: %v1c = icmp ult i32 %v1, 12 %v2c = icmp ugt i32 %v2, 34 %c = and i1 %v1c, %v2c @@ -396,14 +343,14 @@ define amdgpu_cs i32 @branch_divergent_ballot_eq_zero_and(i32 %v1, i32 %v2) { ; CHECK-NEXT: v_cmp_lt_u32_e64 s0, 34, v1 ; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 ; CHECK-NEXT: s_cmp_lg_u32 s0, 0 -; CHECK-NEXT: s_cbranch_scc0 .LBB19_2 +; CHECK-NEXT: s_cbranch_scc0 .LBB17_2 ; CHECK-NEXT: ; %bb.1: ; %false ; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB19_3 -; CHECK-NEXT: .LBB19_2: ; %true +; CHECK-NEXT: s_branch .LBB17_3 +; CHECK-NEXT: .LBB17_2: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 -; CHECK-NEXT: s_branch .LBB19_3 -; CHECK-NEXT: .LBB19_3: +; CHECK-NEXT: s_branch .LBB17_3 +; CHECK-NEXT: .LBB17_3: %v1c = icmp ult i32 %v1, 12 %v2c = icmp ugt i32 %v2, 34 %c = and i1 %v1c, %v2c @@ -427,14 +374,14 @@ define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_and(i32 inreg %v1, i32 inreg ; CHECK-NEXT: s_and_b32 s0, 1, s0 ; CHECK-NEXT: v_cmp_ne_u32_e64 s0, 0, s0 ; CHECK-NEXT: s_cmp_lg_u32 s0, 0 -; CHECK-NEXT: s_cbranch_scc0 .LBB20_2 +; CHECK-NEXT: s_cbranch_scc0 .LBB18_2 ; CHECK-NEXT: ; %bb.1: ; %false ; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB20_3 -; CHECK-NEXT: .LBB20_2: ; %true +; CHECK-NEXT: s_branch .LBB18_3 +; CHECK-NEXT: .LBB18_2: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 -; CHECK-NEXT: s_branch .LBB20_3 -; CHECK-NEXT: .LBB20_3: +; CHECK-NEXT: s_branch .LBB18_3 +; CHECK-NEXT: .LBB18_3: %v1c = icmp ult i32 %v1, 12 %v2c = icmp ugt i32 %v2, 34 %c = and i1 %v1c, %v2c @@ -455,14 +402,14 @@ define amdgpu_cs i32 @branch_uniform_ballot_sgt_N_compare(i32 inreg %v) { ; CHECK-NEXT: s_and_b32 s0, 1, s0 ; CHECK-NEXT: v_cmp_ne_u32_e64 s0, 0, s0 ; CHECK-NEXT: s_cmp_le_i32 s0, 22 -; CHECK-NEXT: s_cbranch_scc1 .LBB21_2 +; CHECK-NEXT: s_cbranch_scc1 .LBB19_2 ; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 -; CHECK-NEXT: s_branch .LBB21_3 -; CHECK-NEXT: .LBB21_2: ; %false +; CHECK-NEXT: s_branch .LBB19_3 +; CHECK-NEXT: .LBB19_2: ; %false ; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB21_3 -; CHECK-NEXT: .LBB21_3: +; CHECK-NEXT: s_branch .LBB19_3 +; CHECK-NEXT: .LBB19_3: %c = icmp ult i32 %v, 12 %ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %c) %bc = icmp sgt i32 %ballot, 22 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i32.ll index 3337d053eb930..857ef99e32618 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i32.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i32.ll @@ -3,7 +3,6 @@ ; RUN: llc -march=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -mattr=+wavefrontsize32,-wavefrontsize64 < %s | FileCheck %s declare i32 @llvm.amdgcn.ballot.i32(i1) -declare i64 @llvm.amdgcn.ballot.i64(i1) declare i32 @llvm.ctpop.i32(i32) ; Test ballot(0) @@ -199,13 +198,11 @@ false: ret i32 33 } -define amdgpu_cs i32 @branch_divergent_ballot64_ne_zero_compare(i32 %v) { -; CHECK-LABEL: branch_divergent_ballot64_ne_zero_compare: +define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_compare(i32 inreg %v) { +; CHECK-LABEL: branch_uniform_ballot_ne_zero_compare: ; CHECK: ; %bb.0: -; CHECK-NEXT: v_cmp_gt_u32_e64 s0, 12, v0 -; CHECK-NEXT: s_mov_b32 s1, 0 -; CHECK-NEXT: s_cmp_eq_u64 s[0:1], 0 -; CHECK-NEXT: s_cbranch_scc1 .LBB12_2 +; CHECK-NEXT: v_cmp_lt_u32_e64 vcc_lo, s0, 12 +; CHECK-NEXT: s_cbranch_vccz .LBB12_2 ; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 ; CHECK-NEXT: s_branch .LBB12_3 @@ -213,28 +210,6 @@ define amdgpu_cs i32 @branch_divergent_ballot64_ne_zero_compare(i32 %v) { ; CHECK-NEXT: s_mov_b32 s0, 33 ; CHECK-NEXT: s_branch .LBB12_3 ; CHECK-NEXT: .LBB12_3: - %c = icmp ult i32 %v, 12 - %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c) - %ballot_ne_zero = icmp ne i64 %ballot, 0 - br i1 %ballot_ne_zero, label %true, label %false -true: - ret i32 42 -false: - ret i32 33 -} - -define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_compare(i32 inreg %v) { -; CHECK-LABEL: branch_uniform_ballot_ne_zero_compare: -; CHECK: ; %bb.0: -; CHECK-NEXT: v_cmp_lt_u32_e64 vcc_lo, s0, 12 -; CHECK-NEXT: s_cbranch_vccz .LBB13_2 -; CHECK-NEXT: ; %bb.1: ; %true -; CHECK-NEXT: s_mov_b32 s0, 42 -; CHECK-NEXT: s_branch .LBB13_3 -; CHECK-NEXT: .LBB13_2: ; %false -; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB13_3 -; CHECK-NEXT: .LBB13_3: %c = icmp ult i32 %v, 12 %ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %c) %ballot_ne_zero = icmp ne i32 %ballot, 0 @@ -249,14 +224,14 @@ define amdgpu_cs i32 @branch_divergent_ballot_eq_zero_compare(i32 %v) { ; CHECK-LABEL: branch_divergent_ballot_eq_zero_compare: ; CHECK: ; %bb.0: ; CHECK-NEXT: v_cmp_gt_u32_e32 vcc_lo, 12, v0 -; CHECK-NEXT: s_cbranch_vccz .LBB14_2 +; CHECK-NEXT: s_cbranch_vccz .LBB13_2 ; CHECK-NEXT: ; %bb.1: ; %false ; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB14_3 -; CHECK-NEXT: .LBB14_2: ; %true +; CHECK-NEXT: s_branch .LBB13_3 +; CHECK-NEXT: .LBB13_2: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 -; CHECK-NEXT: s_branch .LBB14_3 -; CHECK-NEXT: .LBB14_3: +; CHECK-NEXT: s_branch .LBB13_3 +; CHECK-NEXT: .LBB13_3: %c = icmp ult i32 %v, 12 %ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %c) %ballot_eq_zero = icmp eq i32 %ballot, 0 @@ -271,14 +246,14 @@ define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_compare(i32 inreg %v) { ; CHECK-LABEL: branch_uniform_ballot_eq_zero_compare: ; CHECK: ; %bb.0: ; CHECK-NEXT: v_cmp_lt_u32_e64 vcc_lo, s0, 12 -; CHECK-NEXT: s_cbranch_vccz .LBB15_2 +; CHECK-NEXT: s_cbranch_vccz .LBB14_2 ; CHECK-NEXT: ; %bb.1: ; %false ; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB15_3 -; CHECK-NEXT: .LBB15_2: ; %true +; CHECK-NEXT: s_branch .LBB14_3 +; CHECK-NEXT: .LBB14_2: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 -; CHECK-NEXT: s_branch .LBB15_3 -; CHECK-NEXT: .LBB15_3: +; CHECK-NEXT: s_branch .LBB14_3 +; CHECK-NEXT: .LBB14_3: %c = icmp ult i32 %v, 12 %ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %c) %ballot_eq_zero = icmp eq i32 %ballot, 0 @@ -295,14 +270,14 @@ define amdgpu_cs i32 @branch_divergent_ballot_ne_zero_and(i32 %v1, i32 %v2) { ; CHECK-NEXT: v_cmp_gt_u32_e32 vcc_lo, 12, v0 ; CHECK-NEXT: v_cmp_lt_u32_e64 s0, 34, v1 ; CHECK-NEXT: s_and_b32 vcc_lo, vcc_lo, s0 -; CHECK-NEXT: s_cbranch_vccz .LBB16_2 +; CHECK-NEXT: s_cbranch_vccz .LBB15_2 ; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 -; CHECK-NEXT: s_branch .LBB16_3 -; CHECK-NEXT: .LBB16_2: ; %false +; CHECK-NEXT: s_branch .LBB15_3 +; CHECK-NEXT: .LBB15_2: ; %false ; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB16_3 -; CHECK-NEXT: .LBB16_3: +; CHECK-NEXT: s_branch .LBB15_3 +; CHECK-NEXT: .LBB15_3: %v1c = icmp ult i32 %v1, 12 %v2c = icmp ugt i32 %v2, 34 %c = and i1 %v1c, %v2c @@ -315,36 +290,6 @@ false: ret i32 33 } -define amdgpu_cs i32 @branch_divergent_ballot64_ne_zero_and(i32 %v1, i32 %v2) { -; CHECK-LABEL: branch_divergent_ballot64_ne_zero_and: -; CHECK: ; %bb.0: -; CHECK-NEXT: v_cmp_gt_u32_e32 vcc_lo, 12, v0 -; CHECK-NEXT: v_cmp_lt_u32_e64 s0, 34, v1 -; CHECK-NEXT: s_mov_b32 s1, 0 -; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 -; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 -; CHECK-NEXT: v_cmp_ne_u32_e64 s0, 0, v0 -; CHECK-NEXT: s_cmp_eq_u64 s[0:1], 0 -; CHECK-NEXT: s_cbranch_scc1 .LBB17_2 -; CHECK-NEXT: ; %bb.1: ; %true -; CHECK-NEXT: s_mov_b32 s0, 42 -; CHECK-NEXT: s_branch .LBB17_3 -; CHECK-NEXT: .LBB17_2: ; %false -; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB17_3 -; CHECK-NEXT: .LBB17_3: - %v1c = icmp ult i32 %v1, 12 - %v2c = icmp ugt i32 %v2, 34 - %c = and i1 %v1c, %v2c - %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c) - %ballot_ne_zero = icmp ne i64 %ballot, 0 - br i1 %ballot_ne_zero, label %true, label %false -true: - ret i32 42 -false: - ret i32 33 -} - define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_and(i32 inreg %v1, i32 inreg %v2) { ; CHECK-LABEL: branch_uniform_ballot_ne_zero_and: ; CHECK: ; %bb.0: @@ -354,14 +299,14 @@ define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_and(i32 inreg %v1, i32 inreg ; CHECK-NEXT: s_cselect_b32 s1, -1, 0 ; CHECK-NEXT: s_and_b32 s0, s0, s1 ; CHECK-NEXT: s_and_b32 s0, s0, exec_lo -; CHECK-NEXT: s_cbranch_scc0 .LBB18_2 +; CHECK-NEXT: s_cbranch_scc0 .LBB16_2 ; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 -; CHECK-NEXT: s_branch .LBB18_3 -; CHECK-NEXT: .LBB18_2: ; %false +; CHECK-NEXT: s_branch .LBB16_3 +; CHECK-NEXT: .LBB16_2: ; %false ; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB18_3 -; CHECK-NEXT: .LBB18_3: +; CHECK-NEXT: s_branch .LBB16_3 +; CHECK-NEXT: .LBB16_3: %v1c = icmp ult i32 %v1, 12 %v2c = icmp ugt i32 %v2, 34 %c = and i1 %v1c, %v2c @@ -380,14 +325,14 @@ define amdgpu_cs i32 @branch_divergent_ballot_eq_zero_and(i32 %v1, i32 %v2) { ; CHECK-NEXT: v_cmp_gt_u32_e32 vcc_lo, 12, v0 ; CHECK-NEXT: v_cmp_lt_u32_e64 s0, 34, v1 ; CHECK-NEXT: s_and_b32 vcc_lo, vcc_lo, s0 -; CHECK-NEXT: s_cbranch_vccz .LBB19_2 +; CHECK-NEXT: s_cbranch_vccz .LBB17_2 ; CHECK-NEXT: ; %bb.1: ; %false ; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB19_3 -; CHECK-NEXT: .LBB19_2: ; %true +; CHECK-NEXT: s_branch .LBB17_3 +; CHECK-NEXT: .LBB17_2: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 -; CHECK-NEXT: s_branch .LBB19_3 -; CHECK-NEXT: .LBB19_3: +; CHECK-NEXT: s_branch .LBB17_3 +; CHECK-NEXT: .LBB17_3: %v1c = icmp ult i32 %v1, 12 %v2c = icmp ugt i32 %v2, 34 %c = and i1 %v1c, %v2c @@ -409,14 +354,14 @@ define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_and(i32 inreg %v1, i32 inreg ; CHECK-NEXT: s_cselect_b32 s1, -1, 0 ; CHECK-NEXT: s_and_b32 s0, s0, s1 ; CHECK-NEXT: s_and_b32 s0, s0, exec_lo -; CHECK-NEXT: s_cbranch_scc0 .LBB20_2 +; CHECK-NEXT: s_cbranch_scc0 .LBB18_2 ; CHECK-NEXT: ; %bb.1: ; %false ; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB20_3 -; CHECK-NEXT: .LBB20_2: ; %true +; CHECK-NEXT: s_branch .LBB18_3 +; CHECK-NEXT: .LBB18_2: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 -; CHECK-NEXT: s_branch .LBB20_3 -; CHECK-NEXT: .LBB20_3: +; CHECK-NEXT: s_branch .LBB18_3 +; CHECK-NEXT: .LBB18_3: %v1c = icmp ult i32 %v1, 12 %v2c = icmp ugt i32 %v2, 34 %c = and i1 %v1c, %v2c @@ -434,14 +379,14 @@ define amdgpu_cs i32 @branch_uniform_ballot_sgt_N_compare(i32 inreg %v) { ; CHECK: ; %bb.0: ; CHECK-NEXT: v_cmp_lt_u32_e64 s0, s0, 12 ; CHECK-NEXT: s_cmp_lt_i32 s0, 23 -; CHECK-NEXT: s_cbranch_scc1 .LBB21_2 +; CHECK-NEXT: s_cbranch_scc1 .LBB19_2 ; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 -; CHECK-NEXT: s_branch .LBB21_3 -; CHECK-NEXT: .LBB21_2: ; %false +; CHECK-NEXT: s_branch .LBB19_3 +; CHECK-NEXT: .LBB19_2: ; %false ; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB21_3 -; CHECK-NEXT: .LBB21_3: +; CHECK-NEXT: s_branch .LBB19_3 +; CHECK-NEXT: .LBB19_3: %c = icmp ult i32 %v, 12 %ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %c) %bc = icmp sgt i32 %ballot, 22 @@ -460,14 +405,14 @@ define amdgpu_cs i32 @branch_divergent_simulated_negated_ballot_ne_zero_and(i32 ; CHECK-NEXT: v_cmp_gt_u32_e32 vcc_lo, 12, v0 ; CHECK-NEXT: v_cmp_lt_u32_e64 s0, 34, v1 ; CHECK-NEXT: s_and_b32 vcc_lo, vcc_lo, s0 -; CHECK-NEXT: s_cbranch_vccnz .LBB22_2 +; CHECK-NEXT: s_cbranch_vccnz .LBB20_2 ; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 -; CHECK-NEXT: s_branch .LBB22_3 -; CHECK-NEXT: .LBB22_2: ; %false +; CHECK-NEXT: s_branch .LBB20_3 +; CHECK-NEXT: .LBB20_2: ; %false ; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB22_3 -; CHECK-NEXT: .LBB22_3: +; CHECK-NEXT: s_branch .LBB20_3 +; CHECK-NEXT: .LBB20_3: %v1c = icmp ult i32 %v1, 12 %v2c = icmp ugt i32 %v2, 34 %c = and i1 %v1c, %v2c @@ -503,14 +448,14 @@ define amdgpu_cs i32 @branch_uniform_simulated_negated_ballot_ne_zero_and(i32 in ; CHECK-NEXT: s_cselect_b32 s1, -1, 0 ; CHECK-NEXT: s_and_b32 s0, s0, s1 ; CHECK-NEXT: s_and_b32 s0, s0, exec_lo -; CHECK-NEXT: s_cbranch_scc1 .LBB23_2 +; CHECK-NEXT: s_cbranch_scc1 .LBB21_2 ; CHECK-NEXT: ; %bb.1: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 -; CHECK-NEXT: s_branch .LBB23_3 -; CHECK-NEXT: .LBB23_2: ; %false +; CHECK-NEXT: s_branch .LBB21_3 +; CHECK-NEXT: .LBB21_2: ; %false ; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB23_3 -; CHECK-NEXT: .LBB23_3: +; CHECK-NEXT: s_branch .LBB21_3 +; CHECK-NEXT: .LBB21_3: %v1c = icmp ult i32 %v1, 12 %v2c = icmp ugt i32 %v2, 34 %c = and i1 %v1c, %v2c @@ -529,14 +474,14 @@ define amdgpu_cs i32 @branch_divergent_simulated_negated_ballot_eq_zero_and(i32 ; CHECK-NEXT: v_cmp_gt_u32_e32 vcc_lo, 12, v0 ; CHECK-NEXT: v_cmp_lt_u32_e64 s0, 34, v1 ; CHECK-NEXT: s_and_b32 vcc_lo, vcc_lo, s0 -; CHECK-NEXT: s_cbranch_vccnz .LBB24_2 +; CHECK-NEXT: s_cbranch_vccnz .LBB22_2 ; CHECK-NEXT: ; %bb.1: ; %false ; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB24_3 -; CHECK-NEXT: .LBB24_2: ; %true +; CHECK-NEXT: s_branch .LBB22_3 +; CHECK-NEXT: .LBB22_2: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 -; CHECK-NEXT: s_branch .LBB24_3 -; CHECK-NEXT: .LBB24_3: +; CHECK-NEXT: s_branch .LBB22_3 +; CHECK-NEXT: .LBB22_3: %v1c = icmp ult i32 %v1, 12 %v2c = icmp ugt i32 %v2, 34 %c = and i1 %v1c, %v2c @@ -558,14 +503,14 @@ define amdgpu_cs i32 @branch_uniform_simulated_negated_ballot_eq_zero_and(i32 in ; CHECK-NEXT: s_cselect_b32 s1, -1, 0 ; CHECK-NEXT: s_and_b32 s0, s0, s1 ; CHECK-NEXT: s_and_b32 s0, s0, exec_lo -; CHECK-NEXT: s_cbranch_scc1 .LBB25_2 +; CHECK-NEXT: s_cbranch_scc1 .LBB23_2 ; CHECK-NEXT: ; %bb.1: ; %false ; CHECK-NEXT: s_mov_b32 s0, 33 -; CHECK-NEXT: s_branch .LBB25_3 -; CHECK-NEXT: .LBB25_2: ; %true +; CHECK-NEXT: s_branch .LBB23_3 +; CHECK-NEXT: .LBB23_2: ; %true ; CHECK-NEXT: s_mov_b32 s0, 42 -; CHECK-NEXT: s_branch .LBB25_3 -; CHECK-NEXT: .LBB25_3: +; CHECK-NEXT: s_branch .LBB23_3 +; CHECK-NEXT: .LBB23_3: %v1c = icmp ult i32 %v1, 12 %v2c = icmp ugt i32 %v2, 34 %c = and i1 %v1c, %v2c diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i64.wave32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i64.wave32.ll index 109a345d7a2c8..04a993eac82cd 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i64.wave32.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i64.wave32.ll @@ -104,3 +104,73 @@ define amdgpu_cs i64 @ctpop_of_ballot(float %x, float %y) { %bcnt = call i64 @llvm.ctpop.i64(i64 %ballot) ret i64 %bcnt } + +define amdgpu_cs i32 @branch_divergent_ballot64_ne_zero_compare(i32 %v) { +; CHECK-LABEL: branch_divergent_ballot64_ne_zero_compare: +; CHECK: ; %bb.0: +; CHECK-NEXT: v_cmp_gt_u32_e64 s0, 12, v0 +; CHECK-NEXT: s_mov_b32 s1, 0 +; CHECK-NEXT: s_cmp_eq_u64 s[0:1], 0 +; CHECK-NEXT: s_cbranch_scc1 .LBB7_2 +; CHECK-NEXT: ; %bb.1: ; %true +; CHECK-NEXT: s_mov_b32 s0, 42 +; CHECK-NEXT: s_branch .LBB7_3 +; CHECK-NEXT: .LBB7_2: ; %false +; CHECK-NEXT: s_mov_b32 s0, 33 +; CHECK-NEXT: s_branch .LBB7_3 +; CHECK-NEXT: .LBB7_3: + %c = icmp ult i32 %v, 12 + %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c) + %ballot_ne_zero = icmp ne i64 %ballot, 0 + br i1 %ballot_ne_zero, label %true, label %false +true: + ret i32 42 +false: + ret i32 33 +} + +define amdgpu_cs i32 @branch_divergent_ballot64_ne_zero_and(i32 %v1, i32 %v2) { +; DAGISEL-LABEL: branch_divergent_ballot64_ne_zero_and: +; DAGISEL: ; %bb.0: +; DAGISEL-NEXT: v_cmp_gt_u32_e32 vcc_lo, 12, v0 +; DAGISEL-NEXT: v_cmp_lt_u32_e64 s0, 34, v1 +; DAGISEL-NEXT: s_mov_b32 s1, 0 +; DAGISEL-NEXT: s_and_b32 s0, vcc_lo, s0 +; DAGISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; DAGISEL-NEXT: v_cmp_ne_u32_e64 s0, 0, v0 +; DAGISEL-NEXT: s_cmp_eq_u64 s[0:1], 0 +; DAGISEL-NEXT: s_cbranch_scc1 .LBB8_2 +; DAGISEL-NEXT: ; %bb.1: ; %true +; DAGISEL-NEXT: s_mov_b32 s0, 42 +; DAGISEL-NEXT: s_branch .LBB8_3 +; DAGISEL-NEXT: .LBB8_2: ; %false +; DAGISEL-NEXT: s_mov_b32 s0, 33 +; DAGISEL-NEXT: s_branch .LBB8_3 +; DAGISEL-NEXT: .LBB8_3: +; +; GISEL-LABEL: branch_divergent_ballot64_ne_zero_and: +; GISEL: ; %bb.0: +; GISEL-NEXT: v_cmp_gt_u32_e32 vcc_lo, 12, v0 +; GISEL-NEXT: v_cmp_lt_u32_e64 s0, 34, v1 +; GISEL-NEXT: s_mov_b32 s1, 0 +; GISEL-NEXT: s_and_b32 s0, vcc_lo, s0 +; GISEL-NEXT: s_cmp_eq_u64 s[0:1], 0 +; GISEL-NEXT: s_cbranch_scc1 .LBB8_2 +; GISEL-NEXT: ; %bb.1: ; %true +; GISEL-NEXT: s_mov_b32 s0, 42 +; GISEL-NEXT: s_branch .LBB8_3 +; GISEL-NEXT: .LBB8_2: ; %false +; GISEL-NEXT: s_mov_b32 s0, 33 +; GISEL-NEXT: s_branch .LBB8_3 +; GISEL-NEXT: .LBB8_3: + %v1c = icmp ult i32 %v1, 12 + %v2c = icmp ugt i32 %v2, 34 + %c = and i1 %v1c, %v2c + %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c) + %ballot_ne_zero = icmp ne i64 %ballot, 0 + br i1 %ballot_ne_zero, label %true, label %false +true: + ret i32 42 +false: + ret i32 33 +}