Skip to content

Commit bdec5bf

Browse files
authored
[AMDGPU][GlobalISel] Combine (or s64, zext(s32)) (#151519)
If we only deal with a one part of 64bit value we can just generate merge and unmerge which will be either combined away or selected into copy / mov_b32.
1 parent 9824930 commit bdec5bf

15 files changed

+703
-825
lines changed

llvm/lib/Target/AMDGPU/AMDGPUCombine.td

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -176,6 +176,19 @@ def binop_s64_with_s32_mask_combines : GICombineGroup<[
176176
combine_or_s64_with_s32_mask, combine_and_s64_with_s32_mask
177177
]>;
178178

179+
// (or i64:x, (zext i32:y)) -> i64:(merge (or lo_32(x), i32:y), hi_32(x))
180+
// (or (zext i32:y), i64:x) -> i64:(merge (or lo_32(x), i32:y), hi_32(x))
181+
def or_s64_zext_s32_frag : GICombinePatFrag<(outs root:$dst), (ins $src_s64, $src_s32),
182+
[(pattern (G_OR $dst, i64:$src_s64, i64:$zext_val), (G_ZEXT i64:$zext_val, i32:$src_s32)),
183+
(pattern (G_OR $dst, i64:$zext_val, i64:$src_s64), (G_ZEXT i64:$zext_val, i32:$src_s32))]>;
184+
185+
def combine_or_s64_s32 : GICombineRule<
186+
(defs root:$dst),
187+
(match (or_s64_zext_s32_frag $dst, i64:$x, i32:$y):$dst),
188+
(apply (G_UNMERGE_VALUES $x_lo, $x_hi, $x),
189+
(G_OR $or, $x_lo, $y),
190+
(G_MERGE_VALUES $dst, $or, $x_hi))>;
191+
179192
let Predicates = [Has16BitInsts, NotHasMed3_16] in {
180193
// For gfx8, expand f16-fmed3-as-f32 into a min/max f16 sequence. This
181194
// saves one instruction compared to the promotion.
@@ -206,7 +219,7 @@ def AMDGPUPreLegalizerCombiner: GICombiner<
206219
"AMDGPUPreLegalizerCombinerImpl",
207220
[all_combines, combine_fmul_with_select_to_fldexp, clamp_i64_to_i16,
208221
foldable_fneg, combine_shuffle_vector_to_build_vector,
209-
binop_s64_with_s32_mask_combines]> {
222+
binop_s64_with_s32_mask_combines, combine_or_s64_s32]> {
210223
let CombineAllMethodName = "tryCombineAllImpl";
211224
}
212225

@@ -215,7 +228,7 @@ def AMDGPUPostLegalizerCombiner: GICombiner<
215228
[all_combines, gfx6gfx7_combines, gfx8_combines, combine_fmul_with_select_to_fldexp,
216229
uchar_to_float, cvt_f32_ubyteN, remove_fcanonicalize, foldable_fneg,
217230
rcp_sqrt_to_rsq, fdiv_by_sqrt_to_rsq_f16, sign_extension_in_reg, smulu64,
218-
binop_s64_with_s32_mask_combines]> {
231+
binop_s64_with_s32_mask_combines, combine_or_s64_s32]> {
219232
let CombineAllMethodName = "tryCombineAllImpl";
220233
}
221234

llvm/test/CodeGen/AMDGPU/GlobalISel/ashr.ll

Lines changed: 10 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1778,7 +1778,7 @@ define i65 @v_ashr_i65_33(i65 %value) {
17781778
; GFX6-NEXT: v_ashrrev_i32_e32 v2, 31, v1
17791779
; GFX6-NEXT: v_lshl_b64 v[0:1], v[1:2], 31
17801780
; GFX6-NEXT: v_lshrrev_b32_e32 v3, 1, v3
1781-
; GFX6-NEXT: v_or_b32_e32 v0, v3, v0
1781+
; GFX6-NEXT: v_or_b32_e32 v0, v0, v3
17821782
; GFX6-NEXT: v_ashrrev_i32_e32 v2, 1, v2
17831783
; GFX6-NEXT: s_setpc_b64 s[30:31]
17841784
;
@@ -1790,7 +1790,7 @@ define i65 @v_ashr_i65_33(i65 %value) {
17901790
; GFX8-NEXT: v_ashrrev_i32_e32 v2, 31, v1
17911791
; GFX8-NEXT: v_lshlrev_b64 v[0:1], 31, v[1:2]
17921792
; GFX8-NEXT: v_lshrrev_b32_e32 v3, 1, v3
1793-
; GFX8-NEXT: v_or_b32_e32 v0, v3, v0
1793+
; GFX8-NEXT: v_or_b32_e32 v0, v0, v3
17941794
; GFX8-NEXT: v_ashrrev_i32_e32 v2, 1, v2
17951795
; GFX8-NEXT: s_setpc_b64 s[30:31]
17961796
;
@@ -1802,7 +1802,7 @@ define i65 @v_ashr_i65_33(i65 %value) {
18021802
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v1
18031803
; GFX9-NEXT: v_lshlrev_b64 v[0:1], 31, v[1:2]
18041804
; GFX9-NEXT: v_lshrrev_b32_e32 v3, 1, v3
1805-
; GFX9-NEXT: v_or_b32_e32 v0, v3, v0
1805+
; GFX9-NEXT: v_or_b32_e32 v0, v0, v3
18061806
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 1, v2
18071807
; GFX9-NEXT: s_setpc_b64 s[30:31]
18081808
;
@@ -1815,7 +1815,7 @@ define i65 @v_ashr_i65_33(i65 %value) {
18151815
; GFX10PLUS-NEXT: v_ashrrev_i32_e32 v2, 31, v1
18161816
; GFX10PLUS-NEXT: v_lshlrev_b64 v[0:1], 31, v[1:2]
18171817
; GFX10PLUS-NEXT: v_ashrrev_i32_e32 v2, 1, v2
1818-
; GFX10PLUS-NEXT: v_or_b32_e32 v0, v3, v0
1818+
; GFX10PLUS-NEXT: v_or_b32_e32 v0, v0, v3
18191819
; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
18201820
%result = ashr i65 %value, 33
18211821
ret i65 %result
@@ -1875,21 +1875,19 @@ define amdgpu_ps i65 @s_ashr_i65_33(i65 inreg %value) {
18751875
; GCN-LABEL: s_ashr_i65_33:
18761876
; GCN: ; %bb.0:
18771877
; GCN-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x10000
1878-
; GCN-NEXT: s_lshr_b32 s0, s1, 1
1879-
; GCN-NEXT: s_mov_b32 s1, 0
1880-
; GCN-NEXT: s_lshl_b64 s[4:5], s[2:3], 31
1881-
; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
1878+
; GCN-NEXT: s_lshr_b32 s4, s1, 1
1879+
; GCN-NEXT: s_lshl_b64 s[0:1], s[2:3], 31
1880+
; GCN-NEXT: s_or_b32 s0, s0, s4
18821881
; GCN-NEXT: s_ashr_i32 s2, s3, 1
18831882
; GCN-NEXT: ; return to shader part epilog
18841883
;
18851884
; GFX10PLUS-LABEL: s_ashr_i65_33:
18861885
; GFX10PLUS: ; %bb.0:
18871886
; GFX10PLUS-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x10000
1888-
; GFX10PLUS-NEXT: s_lshr_b32 s0, s1, 1
1889-
; GFX10PLUS-NEXT: s_mov_b32 s1, 0
1890-
; GFX10PLUS-NEXT: s_lshl_b64 s[4:5], s[2:3], 31
1887+
; GFX10PLUS-NEXT: s_lshr_b32 s4, s1, 1
1888+
; GFX10PLUS-NEXT: s_lshl_b64 s[0:1], s[2:3], 31
18911889
; GFX10PLUS-NEXT: s_ashr_i32 s2, s3, 1
1892-
; GFX10PLUS-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
1890+
; GFX10PLUS-NEXT: s_or_b32 s0, s0, s4
18931891
; GFX10PLUS-NEXT: ; return to shader part epilog
18941892
%result = ashr i65 %value, 33
18951893
ret i65 %result
Lines changed: 97 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,97 @@
1+
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
2+
# RUN: llc -mtriple=amdgcn -mcpu=tahiti -run-pass=amdgpu-prelegalizer-combiner %s -o - | FileCheck %s
3+
4+
---
5+
name: test_combine_or_s64_s32
6+
tracksRegLiveness: true
7+
body: |
8+
bb.0:
9+
liveins: $sgpr0_sgpr1, $sgpr2
10+
; CHECK-LABEL: name: test_combine_or_s64_s32
11+
; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
12+
; CHECK-NEXT: {{ $}}
13+
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
14+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
15+
; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
16+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV]], [[COPY1]]
17+
; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR]](s32), [[UV1]](s32)
18+
; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
19+
; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
20+
%0:_(s64) = COPY $sgpr0_sgpr1
21+
%1:_(s32) = COPY $sgpr2
22+
%2:_(s64) = G_ZEXT %1(s32)
23+
%3:_(s64) = G_OR %0, %2
24+
$sgpr0_sgpr1 = COPY %3(s64)
25+
SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
26+
...
27+
---
28+
name: test_combine_or_s64_s32_rhs
29+
tracksRegLiveness: true
30+
body: |
31+
bb.0:
32+
liveins: $sgpr0_sgpr1, $sgpr2
33+
; CHECK-LABEL: name: test_combine_or_s64_s32_rhs
34+
; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
35+
; CHECK-NEXT: {{ $}}
36+
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
37+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
38+
; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
39+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV]], [[COPY1]]
40+
; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR]](s32), [[UV1]](s32)
41+
; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
42+
; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
43+
%0:_(s64) = COPY $sgpr0_sgpr1
44+
%1:_(s32) = COPY $sgpr2
45+
%2:_(s64) = G_ZEXT %1(s32)
46+
%3:_(s64) = G_OR %2, %0
47+
$sgpr0_sgpr1 = COPY %3(s64)
48+
SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
49+
...
50+
---
51+
name: test_combine_or_s64_s32_merge_unmerge
52+
tracksRegLiveness: true
53+
body: |
54+
bb.0:
55+
liveins: $sgpr0, $sgpr1, $sgpr2
56+
; CHECK-LABEL: name: test_combine_or_s64_s32_merge_unmerge
57+
; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2
58+
; CHECK-NEXT: {{ $}}
59+
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr0
60+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
61+
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr2
62+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY]], [[COPY2]]
63+
; CHECK-NEXT: $sgpr0 = COPY [[OR]](s32)
64+
; CHECK-NEXT: $sgpr1 = COPY [[COPY1]](s32)
65+
; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
66+
%0:_(s32) = COPY $sgpr0
67+
%1:_(s32) = COPY $sgpr1
68+
%2:_(s32) = COPY $sgpr2
69+
%3:_(s64) = G_MERGE_VALUES %0(s32), %1(s32)
70+
%4:_(s64) = G_ZEXT %2(s32)
71+
%5:_(s64) = G_OR %3, %4
72+
%6:_(s32), %7:_(s32) = G_UNMERGE_VALUES %5(s64)
73+
$sgpr0 = COPY %6(s32)
74+
$sgpr1 = COPY %7(s32)
75+
SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
76+
...
77+
---
78+
name: negative_test_incorrect_types
79+
tracksRegLiveness: true
80+
body: |
81+
bb.0:
82+
liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5
83+
; CHECK-LABEL: name: negative_test_incorrect_types
84+
; CHECK: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5
85+
; CHECK-NEXT: {{ $}}
86+
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
87+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr4_vgpr5
88+
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s128) = G_ZEXT [[COPY1]](s64)
89+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(s128) = G_OR [[COPY]], [[ZEXT]]
90+
; CHECK-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[OR]](s128)
91+
%0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
92+
%1:_(s64) = COPY $vgpr4_vgpr5
93+
%2:_(s128) = G_ZEXT %1
94+
%3:_(s128) = G_OR %0, %2
95+
$vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
96+
...
97+

llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -227,39 +227,38 @@ exit:
227227
define amdgpu_cs void @single_lane_execution_attribute(i32 inreg %.userdata0, <3 x i32> inreg %.WorkgroupId, <3 x i32> %.LocalInvocationId) #0 {
228228
; GFX10-LABEL: single_lane_execution_attribute:
229229
; GFX10: ; %bb.0: ; %.entry
230-
; GFX10-NEXT: s_getpc_b64 s[12:13]
231-
; GFX10-NEXT: s_mov_b32 s12, 0
230+
; GFX10-NEXT: s_getpc_b64 s[4:5]
232231
; GFX10-NEXT: s_mov_b32 s2, s0
233-
; GFX10-NEXT: s_mov_b32 s3, s12
232+
; GFX10-NEXT: s_mov_b32 s3, s5
234233
; GFX10-NEXT: v_mbcnt_lo_u32_b32 v1, -1, 0
235-
; GFX10-NEXT: s_or_b64 s[2:3], s[12:13], s[2:3]
236234
; GFX10-NEXT: s_load_dwordx8 s[4:11], s[2:3], 0x0
237235
; GFX10-NEXT: v_mbcnt_hi_u32_b32 v1, -1, v1
238236
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 2, v1
239237
; GFX10-NEXT: v_and_b32_e32 v3, 1, v1
240238
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v3
241239
; GFX10-NEXT: s_xor_b32 s2, vcc_lo, exec_lo
242-
; GFX10-NEXT: s_and_b32 vcc_lo, s2, exec_lo
243240
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
244241
; GFX10-NEXT: buffer_load_dword v2, v2, s[4:7], 0 offen
242+
; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s2
243+
; GFX10-NEXT: s_mov_b32 s2, 0
245244
; GFX10-NEXT: s_waitcnt vmcnt(0)
246245
; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 0, v2
247246
; GFX10-NEXT: s_cbranch_vccnz .LBB4_4
248247
; GFX10-NEXT: ; %bb.1: ; %.preheader.preheader
249-
; GFX10-NEXT: s_mov_b32 s2, 0
248+
; GFX10-NEXT: s_mov_b32 s3, 0
250249
; GFX10-NEXT: .LBB4_2: ; %.preheader
251250
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
252-
; GFX10-NEXT: v_mov_b32_e32 v3, s12
251+
; GFX10-NEXT: v_mov_b32_e32 v3, s2
253252
; GFX10-NEXT: v_add_nc_u32_e32 v1, -1, v1
254-
; GFX10-NEXT: s_add_i32 s12, s12, 4
253+
; GFX10-NEXT: s_add_i32 s2, s2, 4
255254
; GFX10-NEXT: buffer_load_dword v3, v3, s[4:7], 0 offen
256255
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v1
257256
; GFX10-NEXT: s_waitcnt vmcnt(0)
258-
; GFX10-NEXT: v_readfirstlane_b32 s3, v3
259-
; GFX10-NEXT: s_add_i32 s2, s3, s2
257+
; GFX10-NEXT: v_readfirstlane_b32 s12, v3
258+
; GFX10-NEXT: s_add_i32 s3, s12, s3
260259
; GFX10-NEXT: s_cbranch_vccnz .LBB4_2
261260
; GFX10-NEXT: ; %bb.3: ; %.preheader._crit_edge
262-
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, s2, v2
261+
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, s3, v2
263262
; GFX10-NEXT: s_or_b32 s2, s0, vcc_lo
264263
; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s2
265264
; GFX10-NEXT: s_branch .LBB4_6

0 commit comments

Comments
 (0)