174 changes: 87 additions & 87 deletions llvm/test/CodeGen/AMDGPU/lshr.v2i16.ll
Original file line number Diff line number Diff line change
Expand Up @@ -87,23 +87,23 @@ define amdgpu_kernel void @v_lshr_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16>
; VI-LABEL: v_lshr_v2i16:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
; VI-NEXT: v_lshlrev_b32_e32 v4, 2, v0
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v1, s3
; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v4
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: v_add_u32_e32 v2, vcc, 4, v0
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc
; VI-NEXT: flat_load_dword v5, v[0:1]
; VI-NEXT: flat_load_dword v2, v[2:3]
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v4
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v2
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v4, vcc, 4, v0
; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
; VI-NEXT: flat_load_dword v0, v[0:1]
; VI-NEXT: flat_load_dword v1, v[4:5]
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; VI-NEXT: v_lshrrev_b16_e32 v4, v1, v0
; VI-NEXT: v_lshrrev_b16_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; VI-NEXT: v_or_b32_e32 v0, v4, v0
; VI-NEXT: flat_store_dword v[2:3], v0
; VI-NEXT: v_lshrrev_b16_e32 v3, v2, v5
; VI-NEXT: v_lshrrev_b16_sdwa v2, v2, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; VI-NEXT: v_or_b32_e32 v2, v3, v2
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; CI-LABEL: v_lshr_v2i16:
Expand All @@ -117,14 +117,14 @@ define amdgpu_kernel void @v_lshr_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16>
; CI-NEXT: s_mov_b64 s[0:1], s[6:7]
; CI-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64
; CI-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4
; CI-NEXT: s_mov_b32 s8, 0xffff
; CI-NEXT: s_mov_b32 s0, 0xffff
; CI-NEXT: s_mov_b64 s[6:7], s[2:3]
; CI-NEXT: s_waitcnt vmcnt(1)
; CI-NEXT: v_lshrrev_b32_e32 v4, 16, v2
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: v_lshrrev_b32_e32 v5, 16, v3
; CI-NEXT: v_and_b32_e32 v2, s8, v2
; CI-NEXT: v_and_b32_e32 v3, s8, v3
; CI-NEXT: v_and_b32_e32 v2, s0, v2
; CI-NEXT: v_and_b32_e32 v3, s0, v3
; CI-NEXT: v_lshr_b32_e32 v2, v2, v3
; CI-NEXT: v_lshr_b32_e32 v3, v4, v5
; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
Expand Down Expand Up @@ -171,39 +171,39 @@ define amdgpu_kernel void @lshr_v_s_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16
; VI-NEXT: v_mov_b32_e32 v1, s7
; VI-NEXT: v_add_u32_e32 v0, vcc, s6, v2
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: flat_load_dword v0, v[0:1]
; VI-NEXT: flat_load_dword v3, v[0:1]
; VI-NEXT: s_lshr_b32 s1, s0, 16
; VI-NEXT: v_mov_b32_e32 v4, s1
; VI-NEXT: v_mov_b32_e32 v3, s5
; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v2
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v0, vcc, s4, v2
; VI-NEXT: v_mov_b32_e32 v2, s1
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; VI-NEXT: v_lshrrev_b16_e32 v1, s0, v0
; VI-NEXT: v_lshrrev_b16_sdwa v0, v4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: v_or_b32_e32 v0, v1, v0
; VI-NEXT: flat_store_dword v[2:3], v0
; VI-NEXT: v_lshrrev_b16_e32 v4, s0, v3
; VI-NEXT: v_lshrrev_b16_sdwa v2, v2, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: v_or_b32_e32 v2, v4, v2
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; CI-LABEL: lshr_v_s_v2i16:
; CI: ; %bb.0:
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
; CI-NEXT: s_load_dword s0, s[0:1], 0xd
; CI-NEXT: s_mov_b32 s8, 0xffff
; CI-NEXT: s_load_dword s8, s[0:1], 0xd
; CI-NEXT: s_mov_b32 s3, 0xf000
; CI-NEXT: s_mov_b32 s2, 0
; CI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: s_lshr_b32 s9, s0, 16
; CI-NEXT: s_and_b32 s10, s0, s8
; CI-NEXT: s_mov_b64 s[0:1], s[6:7]
; CI-NEXT: v_mov_b32_e32 v1, 0
; CI-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64
; CI-NEXT: s_mov_b32 s0, 0xffff
; CI-NEXT: s_lshr_b32 s1, s8, 16
; CI-NEXT: s_and_b32 s8, s8, s0
; CI-NEXT: s_mov_b64 s[6:7], s[2:3]
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v2
; CI-NEXT: v_and_b32_e32 v2, s8, v2
; CI-NEXT: v_lshrrev_b32_e32 v3, s9, v3
; CI-NEXT: v_lshrrev_b32_e32 v2, s10, v2
; CI-NEXT: v_and_b32_e32 v2, s0, v2
; CI-NEXT: v_lshrrev_b32_e32 v3, s1, v3
; CI-NEXT: v_lshrrev_b32_e32 v2, s8, v2
; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; CI-NEXT: v_or_b32_e32 v2, v2, v3
; CI-NEXT: buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
Expand Down Expand Up @@ -246,39 +246,39 @@ define amdgpu_kernel void @lshr_s_v_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16
; VI-NEXT: v_mov_b32_e32 v1, s7
; VI-NEXT: v_add_u32_e32 v0, vcc, s6, v2
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: flat_load_dword v0, v[0:1]
; VI-NEXT: flat_load_dword v3, v[0:1]
; VI-NEXT: s_lshr_b32 s1, s0, 16
; VI-NEXT: v_mov_b32_e32 v4, s1
; VI-NEXT: v_mov_b32_e32 v3, s5
; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v2
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v0, vcc, s4, v2
; VI-NEXT: v_mov_b32_e32 v2, s1
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; VI-NEXT: v_lshrrev_b16_e64 v1, v0, s0
; VI-NEXT: v_lshrrev_b16_sdwa v0, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v0, v1, v0
; VI-NEXT: flat_store_dword v[2:3], v0
; VI-NEXT: v_lshrrev_b16_e64 v4, v3, s0
; VI-NEXT: v_lshrrev_b16_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v2, v4, v2
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; CI-LABEL: lshr_s_v_v2i16:
; CI: ; %bb.0:
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
; CI-NEXT: s_load_dword s0, s[0:1], 0xd
; CI-NEXT: s_mov_b32 s8, 0xffff
; CI-NEXT: s_load_dword s8, s[0:1], 0xd
; CI-NEXT: s_mov_b32 s3, 0xf000
; CI-NEXT: s_mov_b32 s2, 0
; CI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: s_lshr_b32 s9, s0, 16
; CI-NEXT: s_and_b32 s10, s0, s8
; CI-NEXT: s_mov_b64 s[0:1], s[6:7]
; CI-NEXT: v_mov_b32_e32 v1, 0
; CI-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64
; CI-NEXT: s_mov_b32 s0, 0xffff
; CI-NEXT: s_lshr_b32 s1, s8, 16
; CI-NEXT: s_and_b32 s8, s8, s0
; CI-NEXT: s_mov_b64 s[6:7], s[2:3]
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v2
; CI-NEXT: v_and_b32_e32 v2, s8, v2
; CI-NEXT: v_lshr_b32_e32 v3, s9, v3
; CI-NEXT: v_lshr_b32_e32 v2, s10, v2
; CI-NEXT: v_and_b32_e32 v2, s0, v2
; CI-NEXT: v_lshr_b32_e32 v3, s1, v3
; CI-NEXT: v_lshr_b32_e32 v2, s8, v2
; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; CI-NEXT: v_or_b32_e32 v2, v2, v3
; CI-NEXT: buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
Expand Down Expand Up @@ -320,15 +320,15 @@ define amdgpu_kernel void @lshr_imm_v_v2i16(<2 x i16> addrspace(1)* %out, <2 x i
; VI-NEXT: v_mov_b32_e32 v1, s3
; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: flat_load_dword v0, v[0:1]
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v2
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: flat_load_dword v3, v[0:1]
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; VI-NEXT: v_lshrrev_b16_e64 v1, v0, 8
; VI-NEXT: v_lshrrev_b16_sdwa v0, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v0, v1, v0
; VI-NEXT: flat_store_dword v[2:3], v0
; VI-NEXT: v_lshrrev_b16_e64 v2, v3, 8
; VI-NEXT: v_lshrrev_b16_sdwa v3, v3, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v2, v2, v3
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; CI-LABEL: lshr_imm_v_v2i16:
Expand Down Expand Up @@ -428,45 +428,45 @@ define amdgpu_kernel void @v_lshr_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16>
; GFX9-LABEL: v_lshr_v4i16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 3, v0
; GFX9-NEXT: v_lshlrev_b32_e32 v4, 3, v0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s3
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s2, v2
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s2, v4
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
; GFX9-NEXT: global_load_dwordx2 v[2:3], v[0:1], off
; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off offset:8
; GFX9-NEXT: v_mov_b32_e32 v3, s1
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v2
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT: v_mov_b32_e32 v5, s1
; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, s0, v4
; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_pk_lshrrev_b16 v1, v1, v5
; GFX9-NEXT: v_pk_lshrrev_b16 v0, v0, v4
; GFX9-NEXT: global_store_dwordx2 v[2:3], v[0:1], off
; GFX9-NEXT: v_pk_lshrrev_b16 v1, v1, v3
; GFX9-NEXT: v_pk_lshrrev_b16 v0, v0, v2
; GFX9-NEXT: global_store_dwordx2 v[4:5], v[0:1], off
; GFX9-NEXT: s_endpgm
;
; VI-LABEL: v_lshr_v4i16:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
; VI-NEXT: v_lshlrev_b32_e32 v2, 3, v0
; VI-NEXT: v_lshlrev_b32_e32 v4, 3, v0
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v1, s3
; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v4
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v2
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v4, vcc, 8, v0
; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
; VI-NEXT: v_add_u32_e32 v2, vcc, 8, v0
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc
; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
; VI-NEXT: flat_load_dwordx2 v[4:5], v[4:5]
; VI-NEXT: flat_load_dwordx2 v[2:3], v[2:3]
; VI-NEXT: v_mov_b32_e32 v5, s1
; VI-NEXT: v_add_u32_e32 v4, vcc, s0, v4
; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; VI-NEXT: v_lshrrev_b16_e32 v6, v5, v1
; VI-NEXT: v_lshrrev_b16_sdwa v1, v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; VI-NEXT: v_lshrrev_b16_e32 v5, v4, v0
; VI-NEXT: v_lshrrev_b16_sdwa v0, v4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; VI-NEXT: v_lshrrev_b16_e32 v6, v3, v1
; VI-NEXT: v_lshrrev_b16_sdwa v1, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; VI-NEXT: v_lshrrev_b16_e32 v3, v2, v0
; VI-NEXT: v_lshrrev_b16_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; VI-NEXT: v_or_b32_e32 v1, v6, v1
; VI-NEXT: v_or_b32_e32 v0, v5, v0
; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; VI-NEXT: v_or_b32_e32 v0, v3, v0
; VI-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
; VI-NEXT: s_endpgm
;
; CI-LABEL: v_lshr_v4i16:
Expand All @@ -480,18 +480,18 @@ define amdgpu_kernel void @v_lshr_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16>
; CI-NEXT: s_mov_b64 s[0:1], s[6:7]
; CI-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64
; CI-NEXT: buffer_load_dwordx2 v[4:5], v[0:1], s[0:3], 0 addr64 offset:8
; CI-NEXT: s_mov_b32 s8, 0xffff
; CI-NEXT: s_mov_b32 s0, 0xffff
; CI-NEXT: s_mov_b64 s[6:7], s[2:3]
; CI-NEXT: s_waitcnt vmcnt(1)
; CI-NEXT: v_lshrrev_b32_e32 v6, 16, v2
; CI-NEXT: v_lshrrev_b32_e32 v7, 16, v3
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: v_lshrrev_b32_e32 v8, 16, v4
; CI-NEXT: v_lshrrev_b32_e32 v9, 16, v5
; CI-NEXT: v_and_b32_e32 v2, s8, v2
; CI-NEXT: v_and_b32_e32 v4, s8, v4
; CI-NEXT: v_and_b32_e32 v3, s8, v3
; CI-NEXT: v_and_b32_e32 v5, s8, v5
; CI-NEXT: v_and_b32_e32 v2, s0, v2
; CI-NEXT: v_and_b32_e32 v4, s0, v4
; CI-NEXT: v_and_b32_e32 v3, s0, v3
; CI-NEXT: v_and_b32_e32 v5, s0, v5
; CI-NEXT: v_lshr_b32_e32 v3, v3, v5
; CI-NEXT: v_lshr_b32_e32 v5, v7, v9
; CI-NEXT: v_lshr_b32_e32 v2, v2, v4
Expand Down Expand Up @@ -565,13 +565,13 @@ define amdgpu_kernel void @lshr_v_imm_v4i16(<4 x i16> addrspace(1)* %out, <4 x i
; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: s_mov_b64 s[0:1], s[6:7]
; CI-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64
; CI-NEXT: s_mov_b32 s8, 0xff00ff
; CI-NEXT: s_mov_b32 s0, 0xff00ff
; CI-NEXT: s_mov_b64 s[6:7], s[2:3]
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: v_lshrrev_b32_e32 v3, 8, v3
; CI-NEXT: v_lshrrev_b32_e32 v2, 8, v2
; CI-NEXT: v_and_b32_e32 v3, s8, v3
; CI-NEXT: v_and_b32_e32 v2, s8, v2
; CI-NEXT: v_and_b32_e32 v3, s0, v3
; CI-NEXT: v_and_b32_e32 v2, s0, v2
; CI-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
; CI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
Expand Down
4 changes: 3 additions & 1 deletion llvm/test/CodeGen/AMDGPU/madak.ll
Original file line number Diff line number Diff line change
Expand Up @@ -39,14 +39,16 @@ define amdgpu_kernel void @madak_f32(float addrspace(1)* noalias %out, float add
; it.

; GCN-LABEL: {{^}}madak_2_use_f32:
; GFX8_9_10: v_mov_b32_e32 [[VK:v[0-9]+]], 0x41200000
; GFX9: v_mov_b32_e32 [[VK:v[0-9]+]], 0x41200000
; GFX10: v_mov_b32_e32 [[VK:v[0-9]+]], 0x41200000
; GFX6-DAG: buffer_load_dword [[VA:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
; GFX6-DAG: buffer_load_dword [[VB:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
; GFX6-DAG: buffer_load_dword [[VC:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8
; GFX8_9_10: {{flat|global}}_load_dword [[VA:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}
; GFX8_9_10: {{flat|global}}_load_dword [[VB:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}
; GFX8_9_10: {{flat|global}}_load_dword [[VC:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}
; GFX6-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x41200000
; GFX8-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x41200000
; GFX6_8_9-DAG: v_madak_f32 {{v[0-9]+}}, [[VA]], [[VB]], 0x41200000
; GFX10-MAD-DAG:v_madak_f32 {{v[0-9]+}}, [[VA]], [[VB]], 0x41200000
; FMA-DAG: v_fmaak_f32 {{v[0-9]+}}, [[VA]], [[VB]], 0x41200000
Expand Down
107 changes: 54 additions & 53 deletions llvm/test/CodeGen/AMDGPU/max.i16.ll
Original file line number Diff line number Diff line change
Expand Up @@ -73,16 +73,16 @@ define amdgpu_kernel void @v_test_imax_sge_v2i16(<2 x i16> addrspace(1)* %out, <
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v4
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: flat_load_dword v0, v[0:1]
; VI-NEXT: flat_load_dword v1, v[2:3]
; VI-NEXT: v_mov_b32_e32 v5, s5
; VI-NEXT: v_add_u32_e32 v4, vcc, s4, v4
; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
; VI-NEXT: flat_load_dword v5, v[0:1]
; VI-NEXT: flat_load_dword v2, v[2:3]
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_add_u32_e32 v0, vcc, s4, v4
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; VI-NEXT: v_max_i16_e32 v2, v0, v1
; VI-NEXT: v_max_i16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; VI-NEXT: v_or_b32_e32 v0, v2, v0
; VI-NEXT: flat_store_dword v[4:5], v0
; VI-NEXT: v_max_i16_e32 v3, v5, v2
; VI-NEXT: v_max_i16_sdwa v2, v5, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; VI-NEXT: v_or_b32_e32 v2, v3, v2
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: v_test_imax_sge_v2i16:
Expand Down Expand Up @@ -124,63 +124,64 @@ define amdgpu_kernel void @v_test_imax_sge_v3i16(<3 x i16> addrspace(1)* %out, <
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
; VI-NEXT: v_lshlrev_b32_e32 v4, 3, v0
; VI-NEXT: v_lshlrev_b32_e32 v6, 3, v0
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v1, s7
; VI-NEXT: v_add_u32_e32 v0, vcc, s6, v4
; VI-NEXT: v_add_u32_e32 v0, vcc, s6, v6
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v4
; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v6
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_mov_b32_e32 v5, s5
; VI-NEXT: v_add_u32_e32 v4, vcc, s4, v4
; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
; VI-NEXT: v_add_u32_e32 v6, vcc, 4, v0
; VI-NEXT: v_addc_u32_e32 v7, vcc, 0, v1, vcc
; VI-NEXT: flat_load_ushort v6, v[6:7]
; VI-NEXT: flat_load_dword v7, v[0:1]
; VI-NEXT: v_add_u32_e32 v4, vcc, 4, v0
; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
; VI-NEXT: flat_load_ushort v4, v[4:5]
; VI-NEXT: flat_load_dword v5, v[0:1]
; VI-NEXT: v_add_u32_e32 v0, vcc, 4, v2
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
; VI-NEXT: flat_load_ushort v0, v[0:1]
; VI-NEXT: flat_load_dword v8, v[2:3]
; VI-NEXT: v_add_u32_e32 v2, vcc, 4, v4
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v5, vcc
; VI-NEXT: flat_load_dword v7, v[2:3]
; VI-NEXT: flat_load_ushort v8, v[0:1]
; VI-NEXT: v_add_u32_e32 v0, vcc, s4, v6
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: v_add_u32_e32 v2, vcc, 4, v0
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc
; VI-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
; VI-NEXT: v_max_i16_e32 v0, v6, v0
; VI-NEXT: v_max_i16_e32 v6, v5, v7
; VI-NEXT: v_max_i16_sdwa v5, v5, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; VI-NEXT: v_max_i16_e32 v1, v7, v8
; VI-NEXT: v_max_i16_sdwa v7, v7, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; VI-NEXT: v_or_b32_e32 v1, v1, v7
; VI-NEXT: flat_store_short v[2:3], v0
; VI-NEXT: flat_store_dword v[4:5], v1
; VI-NEXT: v_max_i16_e32 v4, v4, v8
; VI-NEXT: v_or_b32_e32 v5, v6, v5
; VI-NEXT: flat_store_short v[2:3], v4
; VI-NEXT: flat_store_dword v[0:1], v5
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: v_test_imax_sge_v3i16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
; GFX9-NEXT: v_lshlrev_b32_e32 v4, 3, v0
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 3, v0
; GFX9-NEXT: v_mov_b32_e32 v4, 0
; GFX9-NEXT: v_mov_b32_e32 v6, 0
; GFX9-NEXT: v_mov_b32_e32 v7, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s7
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s6, v4
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s6, v5
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX9-NEXT: v_mov_b32_e32 v3, s1
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v4
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v5
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT: global_load_short_d16 v7, v[0:1], off offset:4
; GFX9-NEXT: global_load_dword v0, v[0:1], off
; GFX9-NEXT: global_load_short_d16 v6, v[2:3], off offset:4
; GFX9-NEXT: global_load_dword v1, v[2:3], off
; GFX9-NEXT: v_mov_b32_e32 v5, s5
; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, s4, v4
; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
; GFX9-NEXT: global_load_short_d16 v6, v[0:1], off offset:4
; GFX9-NEXT: global_load_dword v7, v[0:1], off
; GFX9-NEXT: global_load_short_d16 v4, v[2:3], off offset:4
; GFX9-NEXT: global_load_dword v2, v[2:3], off
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s4, v5
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_pk_max_i16 v3, v6, v4
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_pk_max_i16 v0, v0, v1
; GFX9-NEXT: v_pk_max_i16 v1, v7, v6
; GFX9-NEXT: global_store_short v[4:5], v1, off offset:4
; GFX9-NEXT: global_store_dword v[4:5], v0, off
; GFX9-NEXT: v_pk_max_i16 v2, v7, v2
; GFX9-NEXT: global_store_short v[0:1], v3, off offset:4
; GFX9-NEXT: global_store_dword v[0:1], v2, off
; GFX9-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%gep0 = getelementptr <3 x i16>, <3 x i16> addrspace(1)* %aptr, i32 %tid
Expand Down Expand Up @@ -441,16 +442,16 @@ define amdgpu_kernel void @v_test_umax_ugt_v2i16(<2 x i16> addrspace(1)* %out, <
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v4
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: flat_load_dword v0, v[0:1]
; VI-NEXT: flat_load_dword v1, v[2:3]
; VI-NEXT: v_mov_b32_e32 v5, s5
; VI-NEXT: v_add_u32_e32 v4, vcc, s4, v4
; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
; VI-NEXT: flat_load_dword v5, v[0:1]
; VI-NEXT: flat_load_dword v2, v[2:3]
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_add_u32_e32 v0, vcc, s4, v4
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; VI-NEXT: v_max_u16_e32 v2, v0, v1
; VI-NEXT: v_max_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; VI-NEXT: v_or_b32_e32 v0, v2, v0
; VI-NEXT: flat_store_dword v[4:5], v0
; VI-NEXT: v_max_u16_e32 v3, v5, v2
; VI-NEXT: v_max_u16_sdwa v2, v5, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; VI-NEXT: v_or_b32_e32 v2, v3, v2
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: v_test_umax_ugt_v2i16:
Expand Down
86 changes: 48 additions & 38 deletions llvm/test/CodeGen/AMDGPU/memory_clause.ll
Original file line number Diff line number Diff line change
Expand Up @@ -108,46 +108,56 @@ define void @mubuf_clause(<4 x i32> addrspace(5)* noalias nocapture readonly %ar
; GCN-NEXT: v_and_b32_e32 v2, 0x3ff, v2
; GCN-NEXT: v_lshlrev_b32_e32 v2, 4, v2
; GCN-NEXT: v_add_u32_e32 v0, v0, v2
; GCN-NEXT: buffer_load_dword v3, v0, s[0:3], 0 offen
; GCN-NEXT: buffer_load_dword v4, v0, s[0:3], 0 offen offset:4
; GCN-NEXT: buffer_load_dword v5, v0, s[0:3], 0 offen offset:8
; GCN-NEXT: buffer_load_dword v6, v0, s[0:3], 0 offen offset:12
; GCN-NEXT: buffer_load_dword v7, v0, s[0:3], 0 offen offset:16
; GCN-NEXT: buffer_load_dword v8, v0, s[0:3], 0 offen offset:20
; GCN-NEXT: buffer_load_dword v9, v0, s[0:3], 0 offen offset:24
; GCN-NEXT: buffer_load_dword v10, v0, s[0:3], 0 offen offset:28
; GCN-NEXT: buffer_load_dword v11, v0, s[0:3], 0 offen offset:32
; GCN-NEXT: buffer_load_dword v12, v0, s[0:3], 0 offen offset:36
; GCN-NEXT: buffer_load_dword v13, v0, s[0:3], 0 offen offset:40
; GCN-NEXT: buffer_load_dword v14, v0, s[0:3], 0 offen offset:44
; GCN-NEXT: buffer_load_dword v15, v0, s[0:3], 0 offen offset:48
; GCN-NEXT: buffer_load_dword v16, v0, s[0:3], 0 offen offset:52
; GCN-NEXT: buffer_load_dword v17, v0, s[0:3], 0 offen offset:56
; GCN-NEXT: v_add_u32_e32 v1, v1, v2
; GCN-NEXT: buffer_load_dword v6, v0, s[0:3], 0 offen offset:20
; GCN-NEXT: buffer_load_dword v7, v0, s[0:3], 0 offen offset:24
; GCN-NEXT: buffer_load_dword v8, v0, s[0:3], 0 offen offset:28
; GCN-NEXT: buffer_load_dword v9, v0, s[0:3], 0 offen offset:32
; GCN-NEXT: buffer_load_dword v10, v0, s[0:3], 0 offen offset:36
; GCN-NEXT: buffer_load_dword v11, v0, s[0:3], 0 offen offset:40
; GCN-NEXT: buffer_load_dword v12, v0, s[0:3], 0 offen offset:44
; GCN-NEXT: buffer_load_dword v13, v0, s[0:3], 0 offen offset:48
; GCN-NEXT: buffer_load_dword v14, v0, s[0:3], 0 offen offset:52
; GCN-NEXT: buffer_load_dword v15, v0, s[0:3], 0 offen offset:56
; GCN-NEXT: buffer_load_dword v16, v0, s[0:3], 0 offen offset:60
; GCN-NEXT: buffer_load_dword v2, v0, s[0:3], 0 offen
; GCN-NEXT: buffer_load_dword v3, v0, s[0:3], 0 offen offset:4
; GCN-NEXT: buffer_load_dword v4, v0, s[0:3], 0 offen offset:8
; GCN-NEXT: buffer_load_dword v5, v0, s[0:3], 0 offen offset:12
; GCN-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:60
; GCN-NEXT: s_nop 0
; GCN-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:16
; GCN-NEXT: s_nop 0
; GCN-NEXT: s_waitcnt vmcnt(4)
; GCN-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen
; GCN-NEXT: s_waitcnt vmcnt(4)
; GCN-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen offset:4
; GCN-NEXT: s_waitcnt vmcnt(4)
; GCN-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen offset:8
; GCN-NEXT: s_waitcnt vmcnt(4)
; GCN-NEXT: buffer_store_dword v5, v1, s[0:3], 0 offen offset:12
; GCN-NEXT: s_waitcnt vmcnt(4)
; GCN-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen offset:16
; GCN-NEXT: buffer_store_dword v6, v1, s[0:3], 0 offen offset:20
; GCN-NEXT: buffer_store_dword v7, v1, s[0:3], 0 offen offset:24
; GCN-NEXT: buffer_store_dword v8, v1, s[0:3], 0 offen offset:28
; GCN-NEXT: buffer_store_dword v9, v1, s[0:3], 0 offen offset:32
; GCN-NEXT: buffer_store_dword v10, v1, s[0:3], 0 offen offset:36
; GCN-NEXT: buffer_store_dword v11, v1, s[0:3], 0 offen offset:40
; GCN-NEXT: buffer_store_dword v12, v1, s[0:3], 0 offen offset:44
; GCN-NEXT: buffer_store_dword v13, v1, s[0:3], 0 offen offset:48
; GCN-NEXT: buffer_store_dword v14, v1, s[0:3], 0 offen offset:52
; GCN-NEXT: buffer_store_dword v15, v1, s[0:3], 0 offen offset:56
; GCN-NEXT: buffer_store_dword v16, v1, s[0:3], 0 offen offset:60
; GCN-NEXT: s_waitcnt vmcnt(15)
; GCN-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen
; GCN-NEXT: s_waitcnt vmcnt(15)
; GCN-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen offset:4
; GCN-NEXT: s_waitcnt vmcnt(15)
; GCN-NEXT: buffer_store_dword v5, v1, s[0:3], 0 offen offset:8
; GCN-NEXT: s_waitcnt vmcnt(15)
; GCN-NEXT: buffer_store_dword v6, v1, s[0:3], 0 offen offset:12
; GCN-NEXT: s_waitcnt vmcnt(15)
; GCN-NEXT: buffer_store_dword v7, v1, s[0:3], 0 offen offset:16
; GCN-NEXT: s_waitcnt vmcnt(15)
; GCN-NEXT: buffer_store_dword v8, v1, s[0:3], 0 offen offset:20
; GCN-NEXT: s_waitcnt vmcnt(15)
; GCN-NEXT: buffer_store_dword v9, v1, s[0:3], 0 offen offset:24
; GCN-NEXT: s_waitcnt vmcnt(15)
; GCN-NEXT: buffer_store_dword v10, v1, s[0:3], 0 offen offset:28
; GCN-NEXT: s_waitcnt vmcnt(15)
; GCN-NEXT: buffer_store_dword v11, v1, s[0:3], 0 offen offset:32
; GCN-NEXT: s_waitcnt vmcnt(15)
; GCN-NEXT: buffer_store_dword v12, v1, s[0:3], 0 offen offset:36
; GCN-NEXT: s_waitcnt vmcnt(15)
; GCN-NEXT: buffer_store_dword v13, v1, s[0:3], 0 offen offset:40
; GCN-NEXT: s_waitcnt vmcnt(15)
; GCN-NEXT: buffer_store_dword v14, v1, s[0:3], 0 offen offset:44
; GCN-NEXT: s_waitcnt vmcnt(15)
; GCN-NEXT: buffer_store_dword v15, v1, s[0:3], 0 offen offset:48
; GCN-NEXT: s_waitcnt vmcnt(15)
; GCN-NEXT: buffer_store_dword v16, v1, s[0:3], 0 offen offset:52
; GCN-NEXT: s_waitcnt vmcnt(15)
; GCN-NEXT: buffer_store_dword v17, v1, s[0:3], 0 offen offset:56
; GCN-NEXT: s_waitcnt vmcnt(15)
; GCN-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen offset:60
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_setpc_b64 s[30:31]
bb:
Expand Down
28 changes: 14 additions & 14 deletions llvm/test/CodeGen/AMDGPU/mul24-pass-ordering.ll
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ define void @lsr_order_mul24_1(i32 %arg, i32 %arg1, i32 %arg2, float addrspace(3
; GFX9-NEXT: v_and_b32_e32 v5, 1, v18
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v5
; GFX9-NEXT: v_cmp_lt_u32_e64 s[4:5], v0, v1
; GFX9-NEXT: s_and_saveexec_b64 s[10:11], s[4:5]
; GFX9-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
; GFX9-NEXT: s_cbranch_execz BB1_3
; GFX9-NEXT: ; %bb.1: ; %bb19
; GFX9-NEXT: v_cvt_f32_u32_e32 v7, v6
Expand All @@ -67,7 +67,7 @@ define void @lsr_order_mul24_1(i32 %arg, i32 %arg1, i32 %arg2, float addrspace(3
; GFX9-NEXT: v_rcp_iflag_f32_e32 v4, v7
; GFX9-NEXT: v_lshlrev_b32_e32 v6, 2, v2
; GFX9-NEXT: v_add_u32_e32 v7, v17, v12
; GFX9-NEXT: s_mov_b64 s[12:13], 0
; GFX9-NEXT: s_mov_b64 s[10:11], 0
; GFX9-NEXT: BB1_2: ; %bb23
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: v_cvt_f32_u32_e32 v8, v0
Expand All @@ -76,32 +76,32 @@ define void @lsr_order_mul24_1(i32 %arg, i32 %arg1, i32 %arg2, float addrspace(3
; GFX9-NEXT: v_add_u32_e32 v0, v0, v2
; GFX9-NEXT: v_madak_f32 v8, v8, v4, 0x3727c5ac
; GFX9-NEXT: v_cvt_u32_f32_e32 v8, v8
; GFX9-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v1
; GFX9-NEXT: v_mul_u32_u24_e32 v18, v8, v5
; GFX9-NEXT: v_add_u32_e32 v8, v8, v16
; GFX9-NEXT: v_cmp_lt_u32_e64 s[6:7], v8, v13
; GFX9-NEXT: v_cmp_lt_u32_e64 s[4:5], v8, v13
; GFX9-NEXT: v_mul_lo_u32 v8, v8, v15
; GFX9-NEXT: v_sub_u32_e32 v19, v9, v18
; GFX9-NEXT: v_cmp_lt_u32_e64 s[8:9], v19, v14
; GFX9-NEXT: s_and_b64 s[6:7], s[6:7], s[8:9]
; GFX9-NEXT: v_cmp_lt_u32_e64 s[6:7], v19, v14
; GFX9-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
; GFX9-NEXT: v_sub_u32_e32 v12, v12, v18
; GFX9-NEXT: s_and_b64 s[6:7], s[6:7], vcc
; GFX9-NEXT: v_add_u32_e32 v8, v12, v8
; GFX9-NEXT: s_and_b64 s[4:5], s[4:5], vcc
; GFX9-NEXT: v_mov_b32_e32 v9, 0
; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, v8, s[6:7]
; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, v8, s[4:5]
; GFX9-NEXT: v_lshlrev_b64 v[8:9], 2, v[8:9]
; GFX9-NEXT: s_or_b64 s[12:13], s[4:5], s[12:13]
; GFX9-NEXT: v_add_co_u32_e64 v8, s[4:5], v10, v8
; GFX9-NEXT: v_addc_co_u32_e64 v9, s[4:5], v11, v9, s[4:5]
; GFX9-NEXT: v_add_co_u32_e64 v8, s[6:7], v10, v8
; GFX9-NEXT: v_addc_co_u32_e64 v9, s[6:7], v11, v9, s[6:7]
; GFX9-NEXT: global_load_dword v8, v[8:9], off
; GFX9-NEXT: v_cmp_ge_u32_e64 s[6:7], v0, v1
; GFX9-NEXT: s_or_b64 s[10:11], s[6:7], s[10:11]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, v8, s[6:7]
; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, v8, s[4:5]
; GFX9-NEXT: ds_write_b32 v3, v8
; GFX9-NEXT: v_add_u32_e32 v3, v3, v6
; GFX9-NEXT: s_andn2_b64 exec, exec, s[12:13]
; GFX9-NEXT: s_andn2_b64 exec, exec, s[10:11]
; GFX9-NEXT: s_cbranch_execnz BB1_2
; GFX9-NEXT: BB1_3: ; %Flow3
; GFX9-NEXT: s_or_b64 exec, exec, s[10:11]
; GFX9-NEXT: s_or_b64 exec, exec, s[8:9]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
bb:
Expand Down
6 changes: 3 additions & 3 deletions llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
Original file line number Diff line number Diff line change
Expand Up @@ -98,17 +98,17 @@ define hidden amdgpu_kernel void @clmem_read(i8 addrspace(1)* %buffer) {
; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:-4096
; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off
;
; GFX10: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:-2048
; GFX10: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off{{$}}
; GFX10: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:-2048
; GFX10: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off{{$}}
; GFX10: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:-2048
; GFX10: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off{{$}}
; GFX10: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:-2048
; GFX10: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off{{$}}
; GFX10: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:-2048
; GFX10: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off
; GFX10: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off{{$}}
; GFX10: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:-2048
; GFX10: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off
; GFX10: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off{{$}}
entry:
%call = tail call i64 @_Z13get_global_idj(i32 0)
%conv = and i64 %call, 255
Expand Down
212 changes: 103 additions & 109 deletions llvm/test/CodeGen/AMDGPU/saddo.ll
Original file line number Diff line number Diff line change
Expand Up @@ -166,40 +166,38 @@ define amdgpu_kernel void @v_saddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)*
; SI-NEXT: s_mov_b32 s14, s10
; SI-NEXT: s_mov_b32 s15, s11
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s8, s0
; SI-NEXT: s_mov_b32 s9, s1
; SI-NEXT: s_mov_b32 s12, s2
; SI-NEXT: s_mov_b32 s13, s3
; SI-NEXT: s_mov_b32 s0, s4
; SI-NEXT: s_mov_b32 s1, s5
; SI-NEXT: s_mov_b32 s2, s10
; SI-NEXT: s_mov_b32 s3, s11
; SI-NEXT: s_mov_b32 s12, s4
; SI-NEXT: s_mov_b32 s13, s5
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s7
; SI-NEXT: s_mov_b32 s6, s10
; SI-NEXT: s_mov_b32 s7, s11
; SI-NEXT: buffer_load_dword v0, off, s[0:3], 0
; SI-NEXT: buffer_load_dword v0, off, s[12:15], 0
; SI-NEXT: buffer_load_dword v1, off, s[4:7], 0
; SI-NEXT: s_mov_b32 s8, s0
; SI-NEXT: s_mov_b32 s9, s1
; SI-NEXT: s_mov_b32 s4, s2
; SI-NEXT: s_mov_b32 s5, s3
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v2, vcc, v1, v0
; SI-NEXT: v_cmp_gt_i32_e32 vcc, 0, v1
; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v0
; SI-NEXT: s_xor_b64 s[0:1], vcc, s[0:1]
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; SI-NEXT: buffer_store_dword v2, off, s[8:11], 0
; SI-NEXT: buffer_store_byte v0, off, s[12:15], 0
; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: v_saddo_i32:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v4, s4
; VI-NEXT: v_mov_b32_e32 v5, s5
; VI-NEXT: v_mov_b32_e32 v6, s6
; VI-NEXT: v_mov_b32_e32 v7, s7
; VI-NEXT: flat_load_dword v4, v[4:5]
; VI-NEXT: flat_load_dword v5, v[6:7]
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s6
; VI-NEXT: v_mov_b32_e32 v3, s7
; VI-NEXT: flat_load_dword v4, v[0:1]
; VI-NEXT: flat_load_dword v5, v[2:3]
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
Expand All @@ -218,12 +216,12 @@ define amdgpu_kernel void @v_saddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)*
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v4, s4
; GFX9-NEXT: v_mov_b32_e32 v5, s5
; GFX9-NEXT: v_mov_b32_e32 v6, s6
; GFX9-NEXT: v_mov_b32_e32 v7, s7
; GFX9-NEXT: global_load_dword v4, v[4:5], off
; GFX9-NEXT: global_load_dword v5, v[6:7], off
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s6
; GFX9-NEXT: v_mov_b32_e32 v3, s7
; GFX9-NEXT: global_load_dword v4, v[0:1], off
; GFX9-NEXT: global_load_dword v5, v[2:3], off
; GFX9-NEXT: v_mov_b32_e32 v0, s0
; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: v_mov_b32_e32 v2, s2
Expand Down Expand Up @@ -335,20 +333,18 @@ define amdgpu_kernel void @v_saddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)*
; SI-NEXT: s_mov_b32 s14, s10
; SI-NEXT: s_mov_b32 s15, s11
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s8, s0
; SI-NEXT: s_mov_b32 s9, s1
; SI-NEXT: s_mov_b32 s12, s2
; SI-NEXT: s_mov_b32 s13, s3
; SI-NEXT: s_mov_b32 s0, s4
; SI-NEXT: s_mov_b32 s1, s5
; SI-NEXT: s_mov_b32 s2, s10
; SI-NEXT: s_mov_b32 s3, s11
; SI-NEXT: s_mov_b32 s12, s4
; SI-NEXT: s_mov_b32 s13, s5
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s7
; SI-NEXT: s_mov_b32 s6, s10
; SI-NEXT: s_mov_b32 s7, s11
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[12:15], 0
; SI-NEXT: buffer_load_dwordx2 v[2:3], off, s[4:7], 0
; SI-NEXT: s_mov_b32 s8, s0
; SI-NEXT: s_mov_b32 s9, s1
; SI-NEXT: s_mov_b32 s4, s2
; SI-NEXT: s_mov_b32 s5, s3
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v4, vcc, v0, v2
; SI-NEXT: v_addc_u32_e32 v5, vcc, v1, v3, vcc
Expand All @@ -357,57 +353,57 @@ define amdgpu_kernel void @v_saddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)*
; SI-NEXT: buffer_store_dwordx2 v[4:5], off, s[8:11], 0
; SI-NEXT: s_xor_b64 s[0:1], vcc, s[0:1]
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; SI-NEXT: buffer_store_byte v0, off, s[12:15], 0
; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: v_saddo_i64:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v4, s4
; VI-NEXT: v_mov_b32_e32 v5, s5
; VI-NEXT: v_mov_b32_e32 v6, s6
; VI-NEXT: v_mov_b32_e32 v7, s7
; VI-NEXT: flat_load_dwordx2 v[4:5], v[4:5]
; VI-NEXT: flat_load_dwordx2 v[6:7], v[6:7]
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: v_mov_b32_e32 v3, s3
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s6
; VI-NEXT: v_mov_b32_e32 v3, s7
; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
; VI-NEXT: flat_load_dwordx2 v[2:3], v[2:3]
; VI-NEXT: v_mov_b32_e32 v4, s0
; VI-NEXT: v_mov_b32_e32 v5, s1
; VI-NEXT: v_mov_b32_e32 v6, s2
; VI-NEXT: v_mov_b32_e32 v7, s3
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; VI-NEXT: v_add_u32_e32 v8, vcc, v4, v6
; VI-NEXT: v_addc_u32_e32 v9, vcc, v5, v7, vcc
; VI-NEXT: v_cmp_gt_i64_e32 vcc, 0, v[6:7]
; VI-NEXT: v_cmp_lt_i64_e64 s[0:1], v[8:9], v[4:5]
; VI-NEXT: flat_store_dwordx2 v[0:1], v[8:9]
; VI-NEXT: v_add_u32_e32 v8, vcc, v0, v2
; VI-NEXT: v_addc_u32_e32 v9, vcc, v1, v3, vcc
; VI-NEXT: v_cmp_gt_i64_e32 vcc, 0, v[2:3]
; VI-NEXT: v_cmp_lt_i64_e64 s[0:1], v[8:9], v[0:1]
; VI-NEXT: flat_store_dwordx2 v[4:5], v[8:9]
; VI-NEXT: s_xor_b64 s[0:1], vcc, s[0:1]
; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; VI-NEXT: flat_store_byte v[2:3], v0
; VI-NEXT: flat_store_byte v[6:7], v0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: v_saddo_i64:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v4, s4
; GFX9-NEXT: v_mov_b32_e32 v5, s5
; GFX9-NEXT: v_mov_b32_e32 v6, s6
; GFX9-NEXT: v_mov_b32_e32 v7, s7
; GFX9-NEXT: global_load_dwordx2 v[4:5], v[4:5], off
; GFX9-NEXT: global_load_dwordx2 v[6:7], v[6:7], off
; GFX9-NEXT: v_mov_b32_e32 v0, s0
; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: v_mov_b32_e32 v2, s2
; GFX9-NEXT: v_mov_b32_e32 v3, s3
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s6
; GFX9-NEXT: v_mov_b32_e32 v3, s7
; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off
; GFX9-NEXT: global_load_dwordx2 v[2:3], v[2:3], off
; GFX9-NEXT: v_mov_b32_e32 v4, s0
; GFX9-NEXT: v_mov_b32_e32 v5, s1
; GFX9-NEXT: v_mov_b32_e32 v6, s2
; GFX9-NEXT: v_mov_b32_e32 v7, s3
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v4, v6
; GFX9-NEXT: v_addc_co_u32_e32 v9, vcc, v5, v7, vcc
; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, 0, v[6:7]
; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], v[8:9], v[4:5]
; GFX9-NEXT: global_store_dwordx2 v[0:1], v[8:9], off
; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v0, v2
; GFX9-NEXT: v_addc_co_u32_e32 v9, vcc, v1, v3, vcc
; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, 0, v[2:3]
; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], v[8:9], v[0:1]
; GFX9-NEXT: global_store_dwordx2 v[4:5], v[8:9], off
; GFX9-NEXT: s_xor_b64 s[0:1], vcc, s[0:1]
; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; GFX9-NEXT: global_store_byte v[2:3], v0, off
; GFX9-NEXT: global_store_byte v[6:7], v0, off
; GFX9-NEXT: s_endpgm
%a = load i64, i64 addrspace(1)* %aptr, align 4
%b = load i64, i64 addrspace(1)* %bptr, align 4
Expand All @@ -428,20 +424,18 @@ define amdgpu_kernel void @v_saddo_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32>
; SI-NEXT: s_mov_b32 s14, s10
; SI-NEXT: s_mov_b32 s15, s11
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s8, s0
; SI-NEXT: s_mov_b32 s9, s1
; SI-NEXT: s_mov_b32 s12, s2
; SI-NEXT: s_mov_b32 s13, s3
; SI-NEXT: s_mov_b32 s0, s4
; SI-NEXT: s_mov_b32 s1, s5
; SI-NEXT: s_mov_b32 s2, s10
; SI-NEXT: s_mov_b32 s3, s11
; SI-NEXT: s_mov_b32 s12, s4
; SI-NEXT: s_mov_b32 s13, s5
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s7
; SI-NEXT: s_mov_b32 s6, s10
; SI-NEXT: s_mov_b32 s7, s11
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[12:15], 0
; SI-NEXT: buffer_load_dwordx2 v[2:3], off, s[4:7], 0
; SI-NEXT: s_mov_b32 s8, s0
; SI-NEXT: s_mov_b32 s9, s1
; SI-NEXT: s_mov_b32 s12, s2
; SI-NEXT: s_mov_b32 s13, s3
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v5, vcc, v1, v3
; SI-NEXT: v_add_i32_e32 v4, vcc, v0, v2
Expand All @@ -461,58 +455,58 @@ define amdgpu_kernel void @v_saddo_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32>
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v4, s4
; VI-NEXT: v_mov_b32_e32 v5, s5
; VI-NEXT: v_mov_b32_e32 v6, s6
; VI-NEXT: v_mov_b32_e32 v7, s7
; VI-NEXT: flat_load_dwordx2 v[4:5], v[4:5]
; VI-NEXT: flat_load_dwordx2 v[6:7], v[6:7]
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: v_mov_b32_e32 v3, s3
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s6
; VI-NEXT: v_mov_b32_e32 v3, s7
; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
; VI-NEXT: flat_load_dwordx2 v[2:3], v[2:3]
; VI-NEXT: v_mov_b32_e32 v4, s0
; VI-NEXT: v_mov_b32_e32 v5, s1
; VI-NEXT: v_mov_b32_e32 v6, s2
; VI-NEXT: v_mov_b32_e32 v7, s3
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; VI-NEXT: v_add_u32_e32 v9, vcc, v5, v7
; VI-NEXT: v_add_u32_e32 v8, vcc, v4, v6
; VI-NEXT: v_cmp_gt_i32_e64 s[0:1], 0, v7
; VI-NEXT: v_cmp_lt_i32_e64 s[4:5], v9, v5
; VI-NEXT: v_cmp_gt_i32_e32 vcc, 0, v6
; VI-NEXT: v_cmp_lt_i32_e64 s[2:3], v8, v4
; VI-NEXT: v_add_u32_e32 v9, vcc, v1, v3
; VI-NEXT: v_add_u32_e32 v8, vcc, v0, v2
; VI-NEXT: v_cmp_gt_i32_e64 s[0:1], 0, v3
; VI-NEXT: v_cmp_lt_i32_e64 s[4:5], v9, v1
; VI-NEXT: s_xor_b64 s[0:1], s[0:1], s[4:5]
; VI-NEXT: flat_store_dwordx2 v[0:1], v[8:9]
; VI-NEXT: v_cmp_gt_i32_e32 vcc, 0, v2
; VI-NEXT: v_cmp_lt_i32_e64 s[2:3], v8, v0
; VI-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
; VI-NEXT: s_xor_b64 s[0:1], vcc, s[2:3]
; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; VI-NEXT: flat_store_dwordx2 v[4:5], v[8:9]
; VI-NEXT: flat_store_dwordx2 v[6:7], v[0:1]
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: v_saddo_v2i32:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v4, s4
; GFX9-NEXT: v_mov_b32_e32 v5, s5
; GFX9-NEXT: v_mov_b32_e32 v6, s6
; GFX9-NEXT: v_mov_b32_e32 v7, s7
; GFX9-NEXT: global_load_dwordx2 v[4:5], v[4:5], off
; GFX9-NEXT: global_load_dwordx2 v[6:7], v[6:7], off
; GFX9-NEXT: v_mov_b32_e32 v0, s0
; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: v_mov_b32_e32 v2, s2
; GFX9-NEXT: v_mov_b32_e32 v3, s3
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s6
; GFX9-NEXT: v_mov_b32_e32 v3, s7
; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off
; GFX9-NEXT: global_load_dwordx2 v[2:3], v[2:3], off
; GFX9-NEXT: v_mov_b32_e32 v4, s0
; GFX9-NEXT: v_mov_b32_e32 v5, s1
; GFX9-NEXT: v_mov_b32_e32 v6, s2
; GFX9-NEXT: v_mov_b32_e32 v7, s3
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v9, v5, v7
; GFX9-NEXT: v_add_u32_e32 v8, v4, v6
; GFX9-NEXT: v_cmp_gt_i32_e64 s[0:1], 0, v7
; GFX9-NEXT: v_cmp_lt_i32_e64 s[4:5], v9, v5
; GFX9-NEXT: v_cmp_gt_i32_e32 vcc, 0, v6
; GFX9-NEXT: v_cmp_lt_i32_e64 s[2:3], v8, v4
; GFX9-NEXT: v_add_u32_e32 v9, v1, v3
; GFX9-NEXT: v_add_u32_e32 v8, v0, v2
; GFX9-NEXT: v_cmp_gt_i32_e64 s[0:1], 0, v3
; GFX9-NEXT: v_cmp_lt_i32_e64 s[4:5], v9, v1
; GFX9-NEXT: s_xor_b64 s[0:1], s[0:1], s[4:5]
; GFX9-NEXT: global_store_dwordx2 v[0:1], v[8:9], off
; GFX9-NEXT: v_cmp_gt_i32_e32 vcc, 0, v2
; GFX9-NEXT: v_cmp_lt_i32_e64 s[2:3], v8, v0
; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
; GFX9-NEXT: s_xor_b64 s[0:1], vcc, s[2:3]
; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; GFX9-NEXT: global_store_dwordx2 v[2:3], v[0:1], off
; GFX9-NEXT: global_store_dwordx2 v[4:5], v[8:9], off
; GFX9-NEXT: global_store_dwordx2 v[6:7], v[0:1], off
; GFX9-NEXT: s_endpgm
%a = load <2 x i32>, <2 x i32> addrspace(1)* %aptr, align 4
%b = load <2 x i32>, <2 x i32> addrspace(1)* %bptr, align 4
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/AMDGPU/salu-to-valu.ll
Original file line number Diff line number Diff line change
Expand Up @@ -173,9 +173,9 @@ entry:
; GCN-NOHSA-DAG: s_mov_b32 [[OFFSET0:s[0-9]+]], 0x9a40{{$}}
; CI-NOHSA-DAG: s_mov_b32 [[OFFSET1:s[0-9]+]], 0x9a50{{$}}
; CI-NOHSA-NOT: v_add
; SI: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:16
; CI-NOHSA: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET1]] addr64{{$}}
; GCN-NOHSA: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET0]] addr64{{$}}
; SI: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:16

; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
Expand Down Expand Up @@ -205,8 +205,8 @@ entry:
; SI: s_mov_b32 {{s[0-9]+}}, 0x13480
; SI: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:16
; SI: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:32
; SI: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:48
; SI: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], {{s[0-9]+}} addr64
; SI: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:48
; CI-NOHSA-DAG: s_mov_b32 [[OFFSET0:s[0-9]+]], 0x13480{{$}}
; CI-NOHSA-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET0]] addr64{{$}}
; CI-NOHSA-DAG: s_mov_b32 [[OFFSET1:s[0-9]+]], 0x13490{{$}}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,10 +36,10 @@ body: |
; CHECK: INLINEASM &"", 1 /* sideeffect attdialect */, 851978 /* regdef:VGPR_LO16 */, def dead [[COPY1]], 851978 /* regdef:VGPR_LO16 */, def dead [[COPY]].sub1, 2147483657 /* reguse tiedto:$0 */, [[COPY1]], 2147549193 /* reguse tiedto:$1 */, [[COPY]].sub1
; CHECK: %11.sub0:vreg_512 = COPY [[COPY]].sub0
; CHECK: %11.sub3:vreg_512 = COPY [[COPY]].sub3
; CHECK: dead %10:vgpr_32 = V_ADD_CO_U32_e32 4, [[V_MOV_B32_e32_1]], implicit-def dead $vcc, implicit $exec
; CHECK: %11.sub2:vreg_512 = COPY undef [[V_MOV_B32_e32_]]
; CHECK: %11.sub5:vreg_512 = COPY undef [[V_MOV_B32_e32_]]
; CHECK: [[COPY2:%[0-9]+]]:vreg_512 = COPY %11
; CHECK: dead %10:vgpr_32 = V_ADD_CO_U32_e32 4, [[V_MOV_B32_e32_1]], implicit-def dead $vcc, implicit $exec
; CHECK: S_BRANCH %bb.1
bb.0:
liveins: $sgpr6_sgpr7
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,18 +25,18 @@ body: |
; CHECK: [[DEF:%[0-9]+]]:vreg_64 = IMPLICIT_DEF
; CHECK: [[GLOBAL_LOAD_DWORDX2_:%[0-9]+]]:vreg_64 = GLOBAL_LOAD_DWORDX2 [[DEF]], 0, 0, 0, 0, implicit $exec
; CHECK: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 8, 0, 0, 0, implicit $exec
; CHECK: [[COPY1:%[0-9]+]]:vreg_64 = COPY [[GLOBAL_LOAD_DWORDX2_]]
; CHECK: undef %6.sub0:vreg_64 = V_ADD_F32_e32 [[DEF]].sub0, [[COPY1]].sub0, implicit $mode, implicit $exec
; CHECK: dead undef %6.sub1:vreg_64 = V_ADD_F32_e32 [[DEF]].sub1, [[COPY1]].sub0, implicit $mode, implicit $exec
; CHECK: [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[COPY1]], 0, 0, 0, 0, implicit $exec
; CHECK: undef %4.sub0:vreg_64 = V_MOV_B32_e32 111, implicit $exec
; CHECK: [[DEF1:%[0-9]+]]:vreg_64 = IMPLICIT_DEF
; CHECK: [[DEF2:%[0-9]+]]:vreg_64 = IMPLICIT_DEF
; CHECK: [[DEF3:%[0-9]+]]:vreg_64 = IMPLICIT_DEF
; CHECK: undef %11.sub1:vreg_64 = IMPLICIT_DEF
; CHECK: [[DEF4:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; CHECK: [[DEF5:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; CHECK: [[COPY1:%[0-9]+]]:vreg_64 = COPY [[GLOBAL_LOAD_DWORDX2_]]
; CHECK: undef %6.sub0:vreg_64 = V_ADD_F32_e32 [[DEF]].sub0, [[COPY1]].sub0, implicit $mode, implicit $exec
; CHECK: dead undef %6.sub1:vreg_64 = V_ADD_F32_e32 [[DEF]].sub1, [[COPY1]].sub0, implicit $mode, implicit $exec
; CHECK: [[DEF6:%[0-9]+]]:vreg_64 = IMPLICIT_DEF
; CHECK: [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[COPY1]], 0, 0, 0, 0, implicit $exec
; CHECK: undef %19.sub0:vreg_64 = V_ADD_F32_e32 [[GLOBAL_LOAD_DWORD1]], [[GLOBAL_LOAD_DWORDX2_]].sub0, implicit $mode, implicit $exec
; CHECK: [[DEF7:%[0-9]+]]:vreg_64 = IMPLICIT_DEF
; CHECK: %19.sub1:vreg_64 = V_ADD_F32_e32 [[GLOBAL_LOAD_DWORD]], [[GLOBAL_LOAD_DWORD]], implicit $mode, implicit $exec
Expand Down
236 changes: 115 additions & 121 deletions llvm/test/CodeGen/AMDGPU/sdiv.ll

Large diffs are not rendered by default.

68 changes: 34 additions & 34 deletions llvm/test/CodeGen/AMDGPU/sdiv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1867,56 +1867,56 @@ define i64 @v_test_sdiv_pow2_k_den_i64(i64 %x) {
define amdgpu_kernel void @s_test_sdiv24_k_num_i64(i64 addrspace(1)* %out, i64 %x) {
; GCN-LABEL: s_test_sdiv24_k_num_i64:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
; GCN-NEXT: s_mov_b32 s3, 0xf000
; GCN-NEXT: s_mov_b32 s2, -1
; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
; GCN-NEXT: s_mov_b32 s7, 0xf000
; GCN-NEXT: s_mov_b32 s6, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_ashr_i64 s[6:7], s[6:7], 40
; GCN-NEXT: v_cvt_f32_i32_e32 v0, s6
; GCN-NEXT: s_mov_b32 s7, 0x41c00000
; GCN-NEXT: s_mov_b32 s0, s4
; GCN-NEXT: s_ashr_i32 s4, s6, 30
; GCN-NEXT: s_ashr_i64 s[2:3], s[2:3], 40
; GCN-NEXT: v_cvt_f32_i32_e32 v0, s2
; GCN-NEXT: s_mov_b32 s3, 0x41c00000
; GCN-NEXT: s_mov_b32 s4, s0
; GCN-NEXT: s_ashr_i32 s0, s2, 30
; GCN-NEXT: v_rcp_iflag_f32_e32 v1, v0
; GCN-NEXT: s_mov_b32 s1, s5
; GCN-NEXT: s_or_b32 s6, s4, 1
; GCN-NEXT: v_mul_f32_e32 v1, s7, v1
; GCN-NEXT: s_mov_b32 s5, s1
; GCN-NEXT: s_or_b32 s2, s0, 1
; GCN-NEXT: v_mul_f32_e32 v1, s3, v1
; GCN-NEXT: v_trunc_f32_e32 v1, v1
; GCN-NEXT: v_mad_f32 v2, -v1, v0, s7
; GCN-NEXT: v_mad_f32 v2, -v1, v0, s3
; GCN-NEXT: v_cvt_i32_f32_e32 v1, v1
; GCN-NEXT: v_cmp_ge_f32_e64 s[4:5], |v2|, |v0|
; GCN-NEXT: s_cmp_lg_u32 s4, 0
; GCN-NEXT: s_cselect_b32 s4, s6, 0
; GCN-NEXT: v_add_i32_e32 v0, vcc, s4, v1
; GCN-NEXT: v_cmp_ge_f32_e64 s[0:1], |v2|, |v0|
; GCN-NEXT: s_cmp_lg_u32 s0, 0
; GCN-NEXT: s_cselect_b32 s0, s2, 0
; GCN-NEXT: v_add_i32_e32 v0, vcc, s0, v1
; GCN-NEXT: v_bfe_i32 v0, v0, 0, 24
; GCN-NEXT: v_ashrrev_i32_e32 v1, 31, v0
; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GCN-NEXT: s_endpgm
;
; GCN-IR-LABEL: s_test_sdiv24_k_num_i64:
; GCN-IR: ; %bb.0:
; GCN-IR-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
; GCN-IR-NEXT: s_mov_b32 s3, 0xf000
; GCN-IR-NEXT: s_mov_b32 s2, -1
; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
; GCN-IR-NEXT: s_mov_b32 s7, 0xf000
; GCN-IR-NEXT: s_mov_b32 s6, -1
; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
; GCN-IR-NEXT: s_ashr_i64 s[6:7], s[6:7], 40
; GCN-IR-NEXT: v_cvt_f32_i32_e32 v0, s6
; GCN-IR-NEXT: s_mov_b32 s7, 0x41c00000
; GCN-IR-NEXT: s_mov_b32 s0, s4
; GCN-IR-NEXT: s_ashr_i32 s4, s6, 30
; GCN-IR-NEXT: s_ashr_i64 s[2:3], s[2:3], 40
; GCN-IR-NEXT: v_cvt_f32_i32_e32 v0, s2
; GCN-IR-NEXT: s_mov_b32 s3, 0x41c00000
; GCN-IR-NEXT: s_mov_b32 s4, s0
; GCN-IR-NEXT: s_ashr_i32 s0, s2, 30
; GCN-IR-NEXT: v_rcp_iflag_f32_e32 v1, v0
; GCN-IR-NEXT: s_mov_b32 s1, s5
; GCN-IR-NEXT: s_or_b32 s6, s4, 1
; GCN-IR-NEXT: v_mul_f32_e32 v1, s7, v1
; GCN-IR-NEXT: s_mov_b32 s5, s1
; GCN-IR-NEXT: s_or_b32 s2, s0, 1
; GCN-IR-NEXT: v_mul_f32_e32 v1, s3, v1
; GCN-IR-NEXT: v_trunc_f32_e32 v1, v1
; GCN-IR-NEXT: v_mad_f32 v2, -v1, v0, s7
; GCN-IR-NEXT: v_mad_f32 v2, -v1, v0, s3
; GCN-IR-NEXT: v_cvt_i32_f32_e32 v1, v1
; GCN-IR-NEXT: v_cmp_ge_f32_e64 s[4:5], |v2|, |v0|
; GCN-IR-NEXT: s_cmp_lg_u32 s4, 0
; GCN-IR-NEXT: s_cselect_b32 s4, s6, 0
; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, s4, v1
; GCN-IR-NEXT: v_cmp_ge_f32_e64 s[0:1], |v2|, |v0|
; GCN-IR-NEXT: s_cmp_lg_u32 s0, 0
; GCN-IR-NEXT: s_cselect_b32 s0, s2, 0
; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, s0, v1
; GCN-IR-NEXT: v_bfe_i32 v0, v0, 0, 24
; GCN-IR-NEXT: v_ashrrev_i32_e32 v1, 31, v0
; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GCN-IR-NEXT: s_endpgm
%x.shr = ashr i64 %x, 40
%result = sdiv i64 24, %x.shr
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/AMDGPU/sdwa-peephole.ll
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ entry:
; GCN-LABEL: {{^}}mul_v2i16:
; NOSDWA: v_lshrrev_b32_e32 v[[DST0:[0-9]+]], 16, v{{[0-9]+}}
; NOSDWA: v_lshrrev_b32_e32 v[[DST1:[0-9]+]], 16, v{{[0-9]+}}
; NOSDWA: v_mul_u32_u24_e32 v[[DST_MUL:[0-9]+]], v[[DST1]], v[[DST0]]
; NOSDWA: v_mul_u32_u24_e32 v[[DST_MUL:[0-9]+]], v[[DST0]], v[[DST1]]
; NOSDWA: v_lshlrev_b32_e32 v[[DST_SHL:[0-9]+]], 16, v[[DST_MUL]]
; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[DST_SHL]]
; NOSDWA-NOT: v_mul_u32_u24_sdwa
Expand Down
327 changes: 165 additions & 162 deletions llvm/test/CodeGen/AMDGPU/select.f16.ll

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions llvm/test/CodeGen/AMDGPU/shift-i64-opts.ll
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,8 @@ define amdgpu_kernel void @lshr_i64_32(i64 addrspace(1)* %out, i64 addrspace(1)*
; after 64-bit shift is split.

; GCN-LABEL: {{^}}lshr_and_i64_35:
; GCN: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
; GCN: buffer_load_dword v[[LO:[0-9]+]]
; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
; GCN-DAG: buffer_load_dword v[[LO:[0-9]+]]
; GCN: v_bfe_u32 v[[BFE:[0-9]+]], v[[LO]], 8, 23
; GCN: buffer_store_dwordx2 v{{\[}}[[BFE]]:[[ZERO]]{{\]}}
define amdgpu_kernel void @lshr_and_i64_35(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
Expand Down
56 changes: 28 additions & 28 deletions llvm/test/CodeGen/AMDGPU/shl.ll
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,14 @@ define amdgpu_kernel void @shl_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> add
; GCN-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
; GCN-NEXT: s_mov_b32 s3, 0xf000
; GCN-NEXT: s_mov_b32 s2, -1
; GCN-NEXT: s_mov_b32 s10, s2
; GCN-NEXT: s_mov_b32 s11, s3
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_mov_b32 s8, s6
; GCN-NEXT: s_mov_b32 s9, s7
; GCN-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
; GCN-NEXT: s_mov_b32 s0, s4
; GCN-NEXT: s_mov_b32 s1, s5
; GCN-NEXT: s_mov_b32 s4, s6
; GCN-NEXT: s_mov_b32 s5, s7
; GCN-NEXT: s_mov_b32 s6, s2
; GCN-NEXT: s_mov_b32 s7, s3
; GCN-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_lshl_b32_e32 v1, v1, v3
; GCN-NEXT: v_lshl_b32_e32 v0, v0, v2
Expand Down Expand Up @@ -59,15 +59,15 @@ define amdgpu_kernel void @shl_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> add
; GCN-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
; GCN-NEXT: s_mov_b32 s3, 0xf000
; GCN-NEXT: s_mov_b32 s2, -1
; GCN-NEXT: s_mov_b32 s10, s2
; GCN-NEXT: s_mov_b32 s11, s3
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_mov_b32 s8, s6
; GCN-NEXT: s_mov_b32 s9, s7
; GCN-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
; GCN-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:16
; GCN-NEXT: s_mov_b32 s0, s4
; GCN-NEXT: s_mov_b32 s1, s5
; GCN-NEXT: s_mov_b32 s4, s6
; GCN-NEXT: s_mov_b32 s5, s7
; GCN-NEXT: s_mov_b32 s6, s2
; GCN-NEXT: s_mov_b32 s7, s3
; GCN-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0
; GCN-NEXT: buffer_load_dwordx4 v[4:7], off, s[4:7], 0 offset:16
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_lshl_b32_e32 v3, v3, v7
; GCN-NEXT: v_lshl_b32_e32 v2, v2, v6
Expand Down Expand Up @@ -411,23 +411,23 @@ define amdgpu_kernel void @shl_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> add
; GCN-NEXT: s_mov_b32 s8, s6
; GCN-NEXT: s_mov_b32 s9, s7
; GCN-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GCN-NEXT: s_mov_b64 s[12:13], s[6:7]
; GCN-NEXT: v_mov_b32_e32 v1, 0
; GCN-NEXT: s_mov_b32 s14, 0
; GCN-NEXT: s_mov_b32 s15, s3
; GCN-NEXT: s_mov_b64 s[12:13], s[6:7]
; GCN-NEXT: buffer_load_dword v2, off, s[8:11], 0
; GCN-NEXT: buffer_load_dword v0, v[0:1], s[12:15], 0 addr64 offset:4
; GCN-NEXT: s_mov_b32 s6, 0xffff
; GCN-NEXT: s_mov_b32 s0, s4
; GCN-NEXT: s_mov_b32 s4, 0xffff
; GCN-NEXT: s_mov_b32 s1, s5
; GCN-NEXT: s_waitcnt vmcnt(1)
; GCN-NEXT: v_lshrrev_b32_e32 v1, 16, v2
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_lshrrev_b32_e32 v3, 16, v0
; GCN-NEXT: v_and_b32_e32 v0, s4, v0
; GCN-NEXT: v_and_b32_e32 v0, s6, v0
; GCN-NEXT: v_lshl_b32_e32 v0, v2, v0
; GCN-NEXT: v_lshl_b32_e32 v1, v1, v3
; GCN-NEXT: v_and_b32_e32 v0, s4, v0
; GCN-NEXT: v_and_b32_e32 v0, s6, v0
; GCN-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GCN-NEXT: v_or_b32_e32 v0, v0, v1
; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
Expand Down Expand Up @@ -490,24 +490,24 @@ define amdgpu_kernel void @shl_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> add
; GCN-NEXT: s_mov_b64 s[0:1], s[6:7]
; GCN-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64
; GCN-NEXT: buffer_load_dwordx2 v[4:5], v[0:1], s[0:3], 0 addr64 offset:8
; GCN-NEXT: s_mov_b32 s8, 0xffff
; GCN-NEXT: s_mov_b32 s0, 0xffff
; GCN-NEXT: s_mov_b64 s[6:7], s[2:3]
; GCN-NEXT: s_waitcnt vmcnt(1)
; GCN-NEXT: v_lshrrev_b32_e32 v6, 16, v2
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_and_b32_e32 v8, s8, v4
; GCN-NEXT: v_and_b32_e32 v8, s0, v4
; GCN-NEXT: v_lshrrev_b32_e32 v4, 16, v4
; GCN-NEXT: v_and_b32_e32 v9, s8, v5
; GCN-NEXT: v_and_b32_e32 v9, s0, v5
; GCN-NEXT: v_lshrrev_b32_e32 v7, 16, v3
; GCN-NEXT: v_lshrrev_b32_e32 v5, 16, v5
; GCN-NEXT: v_lshl_b32_e32 v5, v7, v5
; GCN-NEXT: v_lshl_b32_e32 v3, v3, v9
; GCN-NEXT: v_lshl_b32_e32 v4, v6, v4
; GCN-NEXT: v_lshl_b32_e32 v2, v2, v8
; GCN-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; GCN-NEXT: v_and_b32_e32 v3, s8, v3
; GCN-NEXT: v_and_b32_e32 v3, s0, v3
; GCN-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; GCN-NEXT: v_and_b32_e32 v2, s8, v2
; GCN-NEXT: v_and_b32_e32 v2, s0, v2
; GCN-NEXT: v_or_b32_e32 v3, v3, v5
; GCN-NEXT: v_or_b32_e32 v2, v2, v4
; GCN-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
Expand Down Expand Up @@ -732,17 +732,17 @@ define amdgpu_kernel void @shl_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> add
; GCN-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
; GCN-NEXT: s_mov_b32 s3, 0xf000
; GCN-NEXT: s_mov_b32 s2, -1
; GCN-NEXT: s_mov_b32 s10, s2
; GCN-NEXT: s_mov_b32 s11, s3
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_mov_b32 s8, s6
; GCN-NEXT: s_mov_b32 s9, s7
; GCN-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
; GCN-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:16
; GCN-NEXT: buffer_load_dwordx4 v[8:11], off, s[8:11], 0 offset:32
; GCN-NEXT: buffer_load_dwordx4 v[11:14], off, s[8:11], 0 offset:48
; GCN-NEXT: s_mov_b32 s0, s4
; GCN-NEXT: s_mov_b32 s1, s5
; GCN-NEXT: s_mov_b32 s4, s6
; GCN-NEXT: s_mov_b32 s5, s7
; GCN-NEXT: s_mov_b32 s6, s2
; GCN-NEXT: s_mov_b32 s7, s3
; GCN-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0
; GCN-NEXT: buffer_load_dwordx4 v[4:7], off, s[4:7], 0 offset:16
; GCN-NEXT: buffer_load_dwordx4 v[8:11], off, s[4:7], 0 offset:32
; GCN-NEXT: buffer_load_dwordx4 v[11:14], off, s[4:7], 0 offset:48
; GCN-NEXT: s_waitcnt vmcnt(1)
; GCN-NEXT: v_lshl_b64 v[2:3], v[2:3], v10
; GCN-NEXT: s_waitcnt vmcnt(0)
Expand Down
192 changes: 96 additions & 96 deletions llvm/test/CodeGen/AMDGPU/shl.v2i16.ll
Original file line number Diff line number Diff line change
Expand Up @@ -86,23 +86,23 @@ define amdgpu_kernel void @v_shl_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> a
; VI-LABEL: v_shl_v2i16:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
; VI-NEXT: v_lshlrev_b32_e32 v4, 2, v0
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v1, s3
; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v4
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: v_add_u32_e32 v2, vcc, 4, v0
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc
; VI-NEXT: flat_load_dword v5, v[0:1]
; VI-NEXT: flat_load_dword v2, v[2:3]
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v4
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v2
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v4, vcc, 4, v0
; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
; VI-NEXT: flat_load_dword v0, v[0:1]
; VI-NEXT: flat_load_dword v1, v[4:5]
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; VI-NEXT: v_lshlrev_b16_e32 v4, v1, v0
; VI-NEXT: v_lshlrev_b16_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; VI-NEXT: v_or_b32_e32 v0, v4, v0
; VI-NEXT: flat_store_dword v[2:3], v0
; VI-NEXT: v_lshlrev_b16_e32 v3, v2, v5
; VI-NEXT: v_lshlrev_b16_sdwa v2, v2, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; VI-NEXT: v_or_b32_e32 v2, v3, v2
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; CI-LABEL: v_shl_v2i16:
Expand All @@ -116,17 +116,17 @@ define amdgpu_kernel void @v_shl_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> a
; CI-NEXT: s_mov_b64 s[0:1], s[6:7]
; CI-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64
; CI-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4
; CI-NEXT: s_mov_b32 s8, 0xffff
; CI-NEXT: s_mov_b32 s0, 0xffff
; CI-NEXT: s_mov_b64 s[6:7], s[2:3]
; CI-NEXT: s_waitcnt vmcnt(1)
; CI-NEXT: v_lshrrev_b32_e32 v4, 16, v2
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: v_and_b32_e32 v5, s8, v3
; CI-NEXT: v_and_b32_e32 v5, s0, v3
; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
; CI-NEXT: v_lshl_b32_e32 v3, v4, v3
; CI-NEXT: v_lshl_b32_e32 v2, v2, v5
; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; CI-NEXT: v_and_b32_e32 v2, s8, v2
; CI-NEXT: v_and_b32_e32 v2, s0, v2
; CI-NEXT: v_or_b32_e32 v2, v2, v3
; CI-NEXT: buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
; CI-NEXT: s_endpgm
Expand Down Expand Up @@ -170,39 +170,39 @@ define amdgpu_kernel void @shl_v_s_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16>
; VI-NEXT: v_mov_b32_e32 v1, s7
; VI-NEXT: v_add_u32_e32 v0, vcc, s6, v2
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: flat_load_dword v0, v[0:1]
; VI-NEXT: flat_load_dword v3, v[0:1]
; VI-NEXT: s_lshr_b32 s1, s0, 16
; VI-NEXT: v_mov_b32_e32 v4, s1
; VI-NEXT: v_mov_b32_e32 v3, s5
; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v2
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v0, vcc, s4, v2
; VI-NEXT: v_mov_b32_e32 v2, s1
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; VI-NEXT: v_lshlrev_b16_e32 v1, s0, v0
; VI-NEXT: v_lshlrev_b16_sdwa v0, v4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: v_or_b32_e32 v0, v1, v0
; VI-NEXT: flat_store_dword v[2:3], v0
; VI-NEXT: v_lshlrev_b16_e32 v4, s0, v3
; VI-NEXT: v_lshlrev_b16_sdwa v2, v2, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: v_or_b32_e32 v2, v4, v2
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; CI-LABEL: shl_v_s_v2i16:
; CI: ; %bb.0:
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
; CI-NEXT: s_load_dword s0, s[0:1], 0xd
; CI-NEXT: s_mov_b32 s8, 0xffff
; CI-NEXT: s_load_dword s8, s[0:1], 0xd
; CI-NEXT: s_mov_b32 s3, 0xf000
; CI-NEXT: s_mov_b32 s2, 0
; CI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: s_lshr_b32 s9, s0, 16
; CI-NEXT: s_and_b32 s10, s0, s8
; CI-NEXT: s_mov_b64 s[0:1], s[6:7]
; CI-NEXT: v_mov_b32_e32 v1, 0
; CI-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64
; CI-NEXT: s_mov_b32 s0, 0xffff
; CI-NEXT: s_lshr_b32 s1, s8, 16
; CI-NEXT: s_and_b32 s8, s8, s0
; CI-NEXT: s_mov_b64 s[6:7], s[2:3]
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v2
; CI-NEXT: v_lshlrev_b32_e32 v2, s10, v2
; CI-NEXT: v_lshlrev_b32_e32 v3, s9, v3
; CI-NEXT: v_and_b32_e32 v2, s8, v2
; CI-NEXT: v_lshlrev_b32_e32 v2, s8, v2
; CI-NEXT: v_lshlrev_b32_e32 v3, s1, v3
; CI-NEXT: v_and_b32_e32 v2, s0, v2
; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; CI-NEXT: v_or_b32_e32 v2, v2, v3
; CI-NEXT: buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
Expand Down Expand Up @@ -245,17 +245,17 @@ define amdgpu_kernel void @shl_s_v_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16>
; VI-NEXT: v_mov_b32_e32 v1, s7
; VI-NEXT: v_add_u32_e32 v0, vcc, s6, v2
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: flat_load_dword v0, v[0:1]
; VI-NEXT: flat_load_dword v3, v[0:1]
; VI-NEXT: s_lshr_b32 s1, s0, 16
; VI-NEXT: v_mov_b32_e32 v4, s1
; VI-NEXT: v_mov_b32_e32 v3, s5
; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v2
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v0, vcc, s4, v2
; VI-NEXT: v_mov_b32_e32 v2, s1
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; VI-NEXT: v_lshlrev_b16_e64 v1, v0, s0
; VI-NEXT: v_lshlrev_b16_sdwa v0, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v0, v1, v0
; VI-NEXT: flat_store_dword v[2:3], v0
; VI-NEXT: v_lshlrev_b16_e64 v4, v3, s0
; VI-NEXT: v_lshlrev_b16_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v2, v4, v2
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; CI-LABEL: shl_s_v_v2i16:
Expand All @@ -270,12 +270,12 @@ define amdgpu_kernel void @shl_s_v_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16>
; CI-NEXT: v_mov_b32_e32 v1, 0
; CI-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64
; CI-NEXT: s_mov_b32 s0, 0xffff
; CI-NEXT: s_lshr_b32 s9, s8, 16
; CI-NEXT: s_lshr_b32 s1, s8, 16
; CI-NEXT: s_mov_b64 s[6:7], s[2:3]
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: v_and_b32_e32 v3, s0, v2
; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
; CI-NEXT: v_lshl_b32_e32 v2, s9, v2
; CI-NEXT: v_lshl_b32_e32 v2, s1, v2
; CI-NEXT: v_lshl_b32_e32 v3, s8, v3
; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; CI-NEXT: v_and_b32_e32 v3, s0, v3
Expand Down Expand Up @@ -319,15 +319,15 @@ define amdgpu_kernel void @shl_imm_v_v2i16(<2 x i16> addrspace(1)* %out, <2 x i1
; VI-NEXT: v_mov_b32_e32 v1, s3
; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: flat_load_dword v0, v[0:1]
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v2
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: flat_load_dword v3, v[0:1]
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; VI-NEXT: v_lshlrev_b16_e64 v1, v0, 8
; VI-NEXT: v_lshlrev_b16_sdwa v0, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v0, v1, v0
; VI-NEXT: flat_store_dword v[2:3], v0
; VI-NEXT: v_lshlrev_b16_e64 v2, v3, 8
; VI-NEXT: v_lshlrev_b16_sdwa v3, v3, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v2, v2, v3
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; CI-LABEL: shl_imm_v_v2i16:
Expand Down Expand Up @@ -387,16 +387,16 @@ define amdgpu_kernel void @shl_v_imm_v2i16(<2 x i16> addrspace(1)* %out, <2 x i1
; VI-NEXT: v_mov_b32_e32 v1, s3
; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: flat_load_dword v0, v[0:1]
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v2
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: flat_load_dword v3, v[0:1]
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v0
; VI-NEXT: v_and_b32_e32 v1, 0xff000000, v1
; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v1
; VI-NEXT: flat_store_dword v[2:3], v0
; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v3
; VI-NEXT: v_and_b32_e32 v2, 0xff000000, v2
; VI-NEXT: v_lshlrev_b16_e32 v3, 8, v3
; VI-NEXT: v_or_b32_e32 v2, v3, v2
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; CI-LABEL: shl_v_imm_v2i16:
Expand Down Expand Up @@ -429,45 +429,45 @@ define amdgpu_kernel void @v_shl_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> a
; GFX9-LABEL: v_shl_v4i16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 3, v0
; GFX9-NEXT: v_lshlrev_b32_e32 v4, 3, v0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s3
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s2, v2
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s2, v4
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
; GFX9-NEXT: global_load_dwordx2 v[2:3], v[0:1], off
; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off offset:8
; GFX9-NEXT: v_mov_b32_e32 v3, s1
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v2
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT: v_mov_b32_e32 v5, s1
; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, s0, v4
; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_pk_lshlrev_b16 v1, v1, v5
; GFX9-NEXT: v_pk_lshlrev_b16 v0, v0, v4
; GFX9-NEXT: global_store_dwordx2 v[2:3], v[0:1], off
; GFX9-NEXT: v_pk_lshlrev_b16 v1, v1, v3
; GFX9-NEXT: v_pk_lshlrev_b16 v0, v0, v2
; GFX9-NEXT: global_store_dwordx2 v[4:5], v[0:1], off
; GFX9-NEXT: s_endpgm
;
; VI-LABEL: v_shl_v4i16:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
; VI-NEXT: v_lshlrev_b32_e32 v2, 3, v0
; VI-NEXT: v_lshlrev_b32_e32 v4, 3, v0
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v1, s3
; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v4
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v2
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v4, vcc, 8, v0
; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
; VI-NEXT: v_add_u32_e32 v2, vcc, 8, v0
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc
; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
; VI-NEXT: flat_load_dwordx2 v[4:5], v[4:5]
; VI-NEXT: flat_load_dwordx2 v[2:3], v[2:3]
; VI-NEXT: v_mov_b32_e32 v5, s1
; VI-NEXT: v_add_u32_e32 v4, vcc, s0, v4
; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; VI-NEXT: v_lshlrev_b16_e32 v6, v5, v1
; VI-NEXT: v_lshlrev_b16_sdwa v1, v5, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; VI-NEXT: v_lshlrev_b16_e32 v5, v4, v0
; VI-NEXT: v_lshlrev_b16_sdwa v0, v4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; VI-NEXT: v_lshlrev_b16_e32 v6, v3, v1
; VI-NEXT: v_lshlrev_b16_sdwa v1, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; VI-NEXT: v_lshlrev_b16_e32 v3, v2, v0
; VI-NEXT: v_lshlrev_b16_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; VI-NEXT: v_or_b32_e32 v1, v6, v1
; VI-NEXT: v_or_b32_e32 v0, v5, v0
; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; VI-NEXT: v_or_b32_e32 v0, v3, v0
; VI-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
; VI-NEXT: s_endpgm
;
; CI-LABEL: v_shl_v4i16:
Expand All @@ -481,24 +481,24 @@ define amdgpu_kernel void @v_shl_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> a
; CI-NEXT: s_mov_b64 s[0:1], s[6:7]
; CI-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64
; CI-NEXT: buffer_load_dwordx2 v[4:5], v[0:1], s[0:3], 0 addr64 offset:8
; CI-NEXT: s_mov_b32 s8, 0xffff
; CI-NEXT: s_mov_b32 s0, 0xffff
; CI-NEXT: s_mov_b64 s[6:7], s[2:3]
; CI-NEXT: s_waitcnt vmcnt(1)
; CI-NEXT: v_lshrrev_b32_e32 v6, 16, v2
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: v_and_b32_e32 v8, s8, v4
; CI-NEXT: v_and_b32_e32 v8, s0, v4
; CI-NEXT: v_lshrrev_b32_e32 v4, 16, v4
; CI-NEXT: v_and_b32_e32 v9, s8, v5
; CI-NEXT: v_and_b32_e32 v9, s0, v5
; CI-NEXT: v_lshrrev_b32_e32 v7, 16, v3
; CI-NEXT: v_lshrrev_b32_e32 v5, 16, v5
; CI-NEXT: v_lshl_b32_e32 v5, v7, v5
; CI-NEXT: v_lshl_b32_e32 v3, v3, v9
; CI-NEXT: v_lshl_b32_e32 v4, v6, v4
; CI-NEXT: v_lshl_b32_e32 v2, v2, v8
; CI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; CI-NEXT: v_and_b32_e32 v3, s8, v3
; CI-NEXT: v_and_b32_e32 v3, s0, v3
; CI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; CI-NEXT: v_and_b32_e32 v2, s8, v2
; CI-NEXT: v_and_b32_e32 v2, s0, v2
; CI-NEXT: v_or_b32_e32 v3, v3, v5
; CI-NEXT: v_or_b32_e32 v2, v2, v4
; CI-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
Expand Down Expand Up @@ -539,21 +539,21 @@ define amdgpu_kernel void @shl_v_imm_v4i16(<4 x i16> addrspace(1)* %out, <4 x i1
; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
; VI-NEXT: v_lshlrev_b32_e32 v2, 3, v0
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v1, s3
; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
; VI-NEXT: v_mov_b32_e32 v1, s3
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v2
; VI-NEXT: s_mov_b32 s0, 0xff000000
; VI-NEXT: s_mov_b32 s2, 0xff000000
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v2
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; VI-NEXT: v_lshlrev_b32_e32 v4, 8, v1
; VI-NEXT: v_lshlrev_b16_e32 v5, 8, v0
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v0
; VI-NEXT: v_and_b32_e32 v0, s0, v0
; VI-NEXT: v_and_b32_e32 v0, s2, v0
; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1
; VI-NEXT: v_and_b32_e32 v4, s0, v4
; VI-NEXT: v_and_b32_e32 v4, s2, v4
; VI-NEXT: v_or_b32_e32 v1, v1, v4
; VI-NEXT: v_or_b32_e32 v0, v5, v0
; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
Expand All @@ -569,14 +569,14 @@ define amdgpu_kernel void @shl_v_imm_v4i16(<4 x i16> addrspace(1)* %out, <4 x i1
; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: s_mov_b64 s[0:1], s[6:7]
; CI-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64
; CI-NEXT: s_mov_b32 s8, 0xff00
; CI-NEXT: s_mov_b32 s0, 0xff00
; CI-NEXT: s_mov_b64 s[6:7], s[2:3]
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: v_lshrrev_b32_e32 v4, 8, v3
; CI-NEXT: v_lshlrev_b32_e32 v3, 8, v3
; CI-NEXT: v_and_b32_e32 v4, s8, v4
; CI-NEXT: v_and_b32_e32 v4, s0, v4
; CI-NEXT: v_lshlrev_b32_e32 v2, 8, v2
; CI-NEXT: v_and_b32_e32 v3, s8, v3
; CI-NEXT: v_and_b32_e32 v3, s0, v3
; CI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; CI-NEXT: v_or_b32_e32 v3, v3, v4
; CI-NEXT: v_and_b32_e32 v2, 0xff00ff00, v2
Expand Down
286 changes: 143 additions & 143 deletions llvm/test/CodeGen/AMDGPU/shrink-add-sub-constant.ll

Large diffs are not rendered by default.

40 changes: 20 additions & 20 deletions llvm/test/CodeGen/AMDGPU/sign_extend.ll
Original file line number Diff line number Diff line change
Expand Up @@ -399,14 +399,14 @@ define amdgpu_kernel void @v_sext_v4i8_to_v4i32(i32 addrspace(1)* %out, i32 addr
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_mov_b32 s10, s2
; SI-NEXT: s_mov_b32 s11, s3
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s8, s6
; SI-NEXT: s_mov_b32 s9, s7
; SI-NEXT: buffer_load_dword v0, off, s[8:11], 0
; SI-NEXT: s_mov_b32 s0, s4
; SI-NEXT: s_mov_b32 s1, s5
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s7
; SI-NEXT: s_mov_b32 s6, s2
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_ashrrev_i32_e32 v1, 24, v0
; SI-NEXT: v_bfe_i32 v2, v0, 16, 8
Expand All @@ -423,14 +423,14 @@ define amdgpu_kernel void @v_sext_v4i8_to_v4i32(i32 addrspace(1)* %out, i32 addr
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s10, s2
; VI-NEXT: s_mov_b32 s11, s3
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s8, s6
; VI-NEXT: s_mov_b32 s9, s7
; VI-NEXT: buffer_load_dword v0, off, s[8:11], 0
; VI-NEXT: s_mov_b32 s0, s4
; VI-NEXT: s_mov_b32 s1, s5
; VI-NEXT: s_mov_b32 s4, s6
; VI-NEXT: s_mov_b32 s5, s7
; VI-NEXT: s_mov_b32 s6, s2
; VI-NEXT: s_mov_b32 s7, s3
; VI-NEXT: buffer_load_dword v0, off, s[4:7], 0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_lshrrev_b16_e32 v1, 8, v0
; VI-NEXT: v_ashrrev_i32_e32 v2, 24, v0
Expand Down Expand Up @@ -523,14 +523,14 @@ define amdgpu_kernel void @v_sext_v4i16_to_v4i32(i32 addrspace(1)* %out, i64 add
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_mov_b32 s10, s2
; SI-NEXT: s_mov_b32 s11, s3
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s8, s6
; SI-NEXT: s_mov_b32 s9, s7
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
; SI-NEXT: s_mov_b32 s0, s4
; SI-NEXT: s_mov_b32 s1, s5
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s7
; SI-NEXT: s_mov_b32 s6, s2
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_ashr_i64 v[2:3], v[0:1], 48
; SI-NEXT: v_ashrrev_i32_e32 v3, 16, v0
Expand All @@ -547,14 +547,14 @@ define amdgpu_kernel void @v_sext_v4i16_to_v4i32(i32 addrspace(1)* %out, i64 add
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s10, s2
; VI-NEXT: s_mov_b32 s11, s3
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s8, s6
; VI-NEXT: s_mov_b32 s9, s7
; VI-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
; VI-NEXT: s_mov_b32 s0, s4
; VI-NEXT: s_mov_b32 s1, s5
; VI-NEXT: s_mov_b32 s4, s6
; VI-NEXT: s_mov_b32 s5, s7
; VI-NEXT: s_mov_b32 s6, s2
; VI-NEXT: s_mov_b32 s7, s3
; VI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_ashrrev_i32_e32 v3, 16, v0
; VI-NEXT: v_bfe_i32 v0, v0, 0, 16
Expand Down
214 changes: 94 additions & 120 deletions llvm/test/CodeGen/AMDGPU/sub.v2i16.ll

Large diffs are not rendered by default.

8 changes: 4 additions & 4 deletions llvm/test/CodeGen/AMDGPU/trunc-combine.ll
Original file line number Diff line number Diff line change
Expand Up @@ -106,13 +106,13 @@ define amdgpu_kernel void @truncate_high_elt_extract_vector(<2 x i16> addrspace(
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_load_dword s2, s[4:5], 0x0
; VI-NEXT: s_load_dword s3, s[6:7], 0x0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: s_load_dword s0, s[4:5], 0x0
; VI-NEXT: s_load_dword s1, s[6:7], 0x0
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_sext_i32_i16 s0, s0
; VI-NEXT: s_sext_i32_i16 s1, s1
; VI-NEXT: s_sext_i32_i16 s0, s2
; VI-NEXT: s_sext_i32_i16 s1, s3
; VI-NEXT: v_mov_b32_e32 v2, s0
; VI-NEXT: v_mul_i32_i24_e32 v2, s1, v2
; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
Expand Down
44 changes: 22 additions & 22 deletions llvm/test/CodeGen/AMDGPU/udiv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1824,46 +1824,46 @@ define amdgpu_kernel void @s_test_udiv24_k_num_i64(i64 addrspace(1)* %out, i64 %
define amdgpu_kernel void @s_test_udiv24_k_den_i64(i64 addrspace(1)* %out, i64 %x) {
; GCN-LABEL: s_test_udiv24_k_den_i64:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
; GCN-NEXT: s_mov_b32 s7, 0xf000
; GCN-NEXT: s_mov_b32 s6, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_mov_b32 s6, 0x46b6fe00
; GCN-NEXT: s_mov_b32 s3, 0xf000
; GCN-NEXT: s_mov_b32 s2, -1
; GCN-NEXT: s_lshr_b32 s0, s7, 8
; GCN-NEXT: v_cvt_f32_u32_e32 v0, s0
; GCN-NEXT: s_mov_b32 s0, s4
; GCN-NEXT: s_mov_b32 s1, s5
; GCN-NEXT: s_lshr_b32 s2, s3, 8
; GCN-NEXT: v_cvt_f32_u32_e32 v0, s2
; GCN-NEXT: s_mov_b32 s2, 0x46b6fe00
; GCN-NEXT: s_mov_b32 s4, s0
; GCN-NEXT: s_mov_b32 s5, s1
; GCN-NEXT: v_mul_f32_e32 v1, 0x38331158, v0
; GCN-NEXT: v_trunc_f32_e32 v1, v1
; GCN-NEXT: v_cvt_u32_f32_e32 v2, v1
; GCN-NEXT: v_mad_f32 v0, -v1, s6, v0
; GCN-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, s6
; GCN-NEXT: v_mad_f32 v0, -v1, s2, v0
; GCN-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, s2
; GCN-NEXT: v_mov_b32_e32 v1, 0
; GCN-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
; GCN-NEXT: v_and_b32_e32 v0, 0xffffff, v0
; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GCN-NEXT: s_endpgm
;
; GCN-IR-LABEL: s_test_udiv24_k_den_i64:
; GCN-IR: ; %bb.0:
; GCN-IR-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
; GCN-IR-NEXT: s_mov_b32 s7, 0xf000
; GCN-IR-NEXT: s_mov_b32 s6, -1
; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
; GCN-IR-NEXT: s_mov_b32 s6, 0x46b6fe00
; GCN-IR-NEXT: s_mov_b32 s3, 0xf000
; GCN-IR-NEXT: s_mov_b32 s2, -1
; GCN-IR-NEXT: s_lshr_b32 s0, s7, 8
; GCN-IR-NEXT: v_cvt_f32_u32_e32 v0, s0
; GCN-IR-NEXT: s_mov_b32 s0, s4
; GCN-IR-NEXT: s_mov_b32 s1, s5
; GCN-IR-NEXT: s_lshr_b32 s2, s3, 8
; GCN-IR-NEXT: v_cvt_f32_u32_e32 v0, s2
; GCN-IR-NEXT: s_mov_b32 s2, 0x46b6fe00
; GCN-IR-NEXT: s_mov_b32 s4, s0
; GCN-IR-NEXT: s_mov_b32 s5, s1
; GCN-IR-NEXT: v_mul_f32_e32 v1, 0x38331158, v0
; GCN-IR-NEXT: v_trunc_f32_e32 v1, v1
; GCN-IR-NEXT: v_cvt_u32_f32_e32 v2, v1
; GCN-IR-NEXT: v_mad_f32 v0, -v1, s6, v0
; GCN-IR-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, s6
; GCN-IR-NEXT: v_mad_f32 v0, -v1, s2, v0
; GCN-IR-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, s2
; GCN-IR-NEXT: v_mov_b32_e32 v1, 0
; GCN-IR-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
; GCN-IR-NEXT: v_and_b32_e32 v0, 0xffffff, v0
; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GCN-IR-NEXT: s_endpgm
%x.shr = lshr i64 %x, 40
%result = udiv i64 %x.shr, 23423
Expand Down
56 changes: 28 additions & 28 deletions llvm/test/CodeGen/AMDGPU/urem64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1479,52 +1479,52 @@ define amdgpu_kernel void @s_test_urem24_k_num_i64(i64 addrspace(1)* %out, i64 %
define amdgpu_kernel void @s_test_urem24_k_den_i64(i64 addrspace(1)* %out, i64 %x) {
; GCN-LABEL: s_test_urem24_k_den_i64:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
; GCN-NEXT: s_mov_b32 s1, 0x46b6fe00
; GCN-NEXT: s_movk_i32 s0, 0x5b7f
; GCN-NEXT: s_mov_b32 s3, 0xf000
; GCN-NEXT: s_mov_b32 s2, -1
; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
; GCN-NEXT: s_mov_b32 s4, 0x46b6fe00
; GCN-NEXT: s_mov_b32 s7, 0xf000
; GCN-NEXT: s_mov_b32 s6, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_lshr_b32 s6, s7, 8
; GCN-NEXT: v_cvt_f32_u32_e32 v0, s6
; GCN-NEXT: s_lshr_b32 s2, s3, 8
; GCN-NEXT: v_cvt_f32_u32_e32 v0, s2
; GCN-NEXT: s_movk_i32 s3, 0x5b7f
; GCN-NEXT: s_mov_b32 s5, s1
; GCN-NEXT: v_mul_f32_e32 v1, 0x38331158, v0
; GCN-NEXT: v_trunc_f32_e32 v1, v1
; GCN-NEXT: v_cvt_u32_f32_e32 v2, v1
; GCN-NEXT: v_mad_f32 v0, -v1, s1, v0
; GCN-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, s1
; GCN-NEXT: s_mov_b32 s1, s5
; GCN-NEXT: v_mad_f32 v0, -v1, s4, v0
; GCN-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, s4
; GCN-NEXT: s_mov_b32 s4, s0
; GCN-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
; GCN-NEXT: v_mul_lo_u32 v0, v0, s0
; GCN-NEXT: s_mov_b32 s0, s4
; GCN-NEXT: v_mul_lo_u32 v0, v0, s3
; GCN-NEXT: v_mov_b32_e32 v1, 0
; GCN-NEXT: v_sub_i32_e32 v0, vcc, s6, v0
; GCN-NEXT: v_sub_i32_e32 v0, vcc, s2, v0
; GCN-NEXT: v_and_b32_e32 v0, 0xffffff, v0
; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GCN-NEXT: s_endpgm
;
; GCN-IR-LABEL: s_test_urem24_k_den_i64:
; GCN-IR: ; %bb.0:
; GCN-IR-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
; GCN-IR-NEXT: s_mov_b32 s1, 0x46b6fe00
; GCN-IR-NEXT: s_movk_i32 s0, 0x5b7f
; GCN-IR-NEXT: s_mov_b32 s3, 0xf000
; GCN-IR-NEXT: s_mov_b32 s2, -1
; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
; GCN-IR-NEXT: s_mov_b32 s4, 0x46b6fe00
; GCN-IR-NEXT: s_mov_b32 s7, 0xf000
; GCN-IR-NEXT: s_mov_b32 s6, -1
; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
; GCN-IR-NEXT: s_lshr_b32 s6, s7, 8
; GCN-IR-NEXT: v_cvt_f32_u32_e32 v0, s6
; GCN-IR-NEXT: s_lshr_b32 s2, s3, 8
; GCN-IR-NEXT: v_cvt_f32_u32_e32 v0, s2
; GCN-IR-NEXT: s_movk_i32 s3, 0x5b7f
; GCN-IR-NEXT: s_mov_b32 s5, s1
; GCN-IR-NEXT: v_mul_f32_e32 v1, 0x38331158, v0
; GCN-IR-NEXT: v_trunc_f32_e32 v1, v1
; GCN-IR-NEXT: v_cvt_u32_f32_e32 v2, v1
; GCN-IR-NEXT: v_mad_f32 v0, -v1, s1, v0
; GCN-IR-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, s1
; GCN-IR-NEXT: s_mov_b32 s1, s5
; GCN-IR-NEXT: v_mad_f32 v0, -v1, s4, v0
; GCN-IR-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, s4
; GCN-IR-NEXT: s_mov_b32 s4, s0
; GCN-IR-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
; GCN-IR-NEXT: v_mul_lo_u32 v0, v0, s0
; GCN-IR-NEXT: s_mov_b32 s0, s4
; GCN-IR-NEXT: v_mul_lo_u32 v0, v0, s3
; GCN-IR-NEXT: v_mov_b32_e32 v1, 0
; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, s6, v0
; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, s2, v0
; GCN-IR-NEXT: v_and_b32_e32 v0, 0xffffff, v0
; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GCN-IR-NEXT: s_endpgm
%x.shr = lshr i64 %x, 40
%result = urem i64 %x.shr, 23423
Expand Down
32 changes: 15 additions & 17 deletions llvm/test/CodeGen/AMDGPU/v_madak_f16.ll
Original file line number Diff line number Diff line change
Expand Up @@ -80,12 +80,12 @@ define amdgpu_kernel void @madak_f16_use_2(
; SI-NEXT: s_mov_b32 s9, s11
; SI-NEXT: s_mov_b32 s10, s2
; SI-NEXT: s_mov_b32 s11, s3
; SI-NEXT: buffer_load_ushort v0, off, s[16:19], 0
; SI-NEXT: buffer_load_ushort v1, off, s[8:11], 0
; SI-NEXT: s_mov_b32 s14, s2
; SI-NEXT: s_mov_b32 s15, s3
; SI-NEXT: buffer_load_ushort v3, off, s[12:15], 0
; SI-NEXT: v_mov_b32_e32 v2, 0x41200000
; SI-NEXT: buffer_load_ushort v0, off, s[16:19], 0
; SI-NEXT: buffer_load_ushort v1, off, s[8:11], 0
; SI-NEXT: buffer_load_ushort v2, off, s[12:15], 0
; SI-NEXT: v_mov_b32_e32 v3, 0x41200000
; SI-NEXT: s_mov_b32 s0, s4
; SI-NEXT: s_mov_b32 s1, s5
; SI-NEXT: s_mov_b32 s8, s6
Expand All @@ -95,11 +95,11 @@ define amdgpu_kernel void @madak_f16_use_2(
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
; SI-NEXT: v_madak_f32 v1, v0, v1, 0x41200000
; SI-NEXT: v_mac_f32_e32 v2, v0, v3
; SI-NEXT: v_mac_f32_e32 v3, v0, v2
; SI-NEXT: v_cvt_f16_f32_e32 v0, v1
; SI-NEXT: v_cvt_f16_f32_e32 v1, v2
; SI-NEXT: v_cvt_f16_f32_e32 v1, v3
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
; SI-NEXT: buffer_store_short v1, off, s[8:11], 0
; SI-NEXT: s_endpgm
Expand All @@ -119,24 +119,22 @@ define amdgpu_kernel void @madak_f16_use_2(
; VI-NEXT: s_mov_b32 s9, s11
; VI-NEXT: s_mov_b32 s10, s2
; VI-NEXT: s_mov_b32 s11, s3
; VI-NEXT: buffer_load_ushort v0, off, s[16:19], 0
; VI-NEXT: buffer_load_ushort v1, off, s[8:11], 0
; VI-NEXT: s_mov_b32 s14, s2
; VI-NEXT: s_mov_b32 s15, s3
; VI-NEXT: buffer_load_ushort v3, off, s[12:15], 0
; VI-NEXT: v_mov_b32_e32 v2, 0x4900
; VI-NEXT: buffer_load_ushort v0, off, s[16:19], 0
; VI-NEXT: buffer_load_ushort v1, off, s[8:11], 0
; VI-NEXT: buffer_load_ushort v2, off, s[12:15], 0
; VI-NEXT: v_mov_b32_e32 v3, 0x4900
; VI-NEXT: s_mov_b32 s0, s4
; VI-NEXT: s_mov_b32 s1, s5
; VI-NEXT: s_mov_b32 s4, s6
; VI-NEXT: s_mov_b32 s5, s7
; VI-NEXT: s_mov_b32 s6, s2
; VI-NEXT: s_mov_b32 s7, s3
; VI-NEXT: s_mov_b32 s8, s6
; VI-NEXT: s_mov_b32 s9, s7
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_madak_f16 v1, v0, v1, 0x4900
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_mac_f16_e32 v2, v0, v3
; VI-NEXT: v_mac_f16_e32 v3, v0, v2
; VI-NEXT: buffer_store_short v1, off, s[0:3], 0
; VI-NEXT: buffer_store_short v2, off, s[4:7], 0
; VI-NEXT: buffer_store_short v3, off, s[8:11], 0
; VI-NEXT: s_endpgm
half addrspace(1)* %r0,
half addrspace(1)* %r1,
Expand Down
28 changes: 14 additions & 14 deletions llvm/test/CodeGen/AMDGPU/vector-extract-insert.ll
Original file line number Diff line number Diff line change
Expand Up @@ -36,33 +36,33 @@ define amdgpu_kernel void @extract_insert_different_dynelt_v4i32(i32 addrspace(1
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
; GCN-NEXT: s_load_dwordx4 s[8:11], s[0:1], 0xd
; GCN-NEXT: v_mov_b32_e32 v2, 0
; GCN-NEXT: v_mov_b32_e32 v5, 0
; GCN-NEXT: s_mov_b32 s3, 0xf000
; GCN-NEXT: s_mov_b32 s2, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_mov_b64 s[0:1], s[6:7]
; GCN-NEXT: v_lshlrev_b32_e32 v1, 4, v0
; GCN-NEXT: v_lshlrev_b32_e32 v4, 2, v0
; GCN-NEXT: v_mov_b32_e32 v5, v2
; GCN-NEXT: buffer_load_dwordx4 v[0:3], v[1:2], s[0:3], 0 addr64
; GCN-NEXT: v_mov_b32_e32 v6, s8
; GCN-NEXT: v_lshlrev_b32_e32 v4, 4, v0
; GCN-NEXT: buffer_load_dwordx4 v[1:4], v[4:5], s[0:3], 0 addr64
; GCN-NEXT: v_lshlrev_b32_e32 v6, 2, v0
; GCN-NEXT: v_mov_b32_e32 v0, s8
; GCN-NEXT: v_cmp_eq_u32_e64 vcc, s9, 3
; GCN-NEXT: v_mov_b32_e32 v7, v5
; GCN-NEXT: s_mov_b64 s[6:7], s[2:3]
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc
; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v0, vcc
; GCN-NEXT: v_cmp_eq_u32_e64 vcc, s9, 2
; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v6, vcc
; GCN-NEXT: v_cndmask_b32_e32 v3, v3, v0, vcc
; GCN-NEXT: v_cmp_eq_u32_e64 vcc, s9, 1
; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc
; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v0, vcc
; GCN-NEXT: v_cmp_eq_u32_e64 vcc, s9, 0
; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc
; GCN-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
; GCN-NEXT: v_cmp_eq_u32_e64 vcc, s10, 1
; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
; GCN-NEXT: v_cmp_eq_u32_e64 vcc, s10, 2
; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
; GCN-NEXT: v_cmp_eq_u32_e64 vcc, s10, 3
; GCN-NEXT: v_cmp_eq_u32_e64 vcc, s10, 2
; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
; GCN-NEXT: buffer_store_dword v0, v[4:5], s[4:7], 0 addr64
; GCN-NEXT: v_cmp_eq_u32_e64 vcc, s10, 3
; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
; GCN-NEXT: buffer_store_dword v0, v[6:7], s[4:7], 0 addr64
; GCN-NEXT: s_endpgm
%id = call i32 @llvm.amdgcn.workitem.id.x()
%id.ext = sext i32 %id to i64
Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/AMDGPU/vector_shuffle.packed.ll
Original file line number Diff line number Diff line change
Expand Up @@ -108,12 +108,12 @@ define <4 x half> @shuffle_v4f16_35u5(<4 x half> addrspace(1)* %arg0, <4 x half>
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: global_load_dword v2, v[2:3], off
; GFX9-NEXT: global_load_dword v0, v[0:1], off offset:4
; GFX9-NEXT: v_mov_b32_e32 v3, 0xffff
; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v2
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_and_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GFX9-NEXT: v_and_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v3, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v1, v2
; GFX9-NEXT: s_setpc_b64 s[30:31]
%val0 = load <4 x half>, <4 x half> addrspace(1)* %arg0
Expand All @@ -128,11 +128,11 @@ define <4 x half> @shuffle_v4f16_357u(<4 x half> addrspace(1)* %arg0, <4 x half>
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: global_load_dwordx2 v[2:3], v[2:3], off
; GFX9-NEXT: global_load_dword v0, v[0:1], off offset:4
; GFX9-NEXT: v_mov_b32_e32 v4, 0xffff
; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_and_b32_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_and_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v3
; GFX9-NEXT: v_lshl_or_b32 v0, v2, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
Expand Down
Loading