Skip to content

Commit

Permalink
AMDGPU: Don't report 2-byte alignment as fast
Browse files Browse the repository at this point in the history
This is apparently worse than 1-byte alignment. This does not attempt
to decompose 2-byte aligned wide stores, but will stop trying to
produce them.

Also fix bug in LoadStoreVectorizer which was decreasing the alignment
and vectorizing stack accesses. It was assuming a stack object was an
alloca that could have its base alignment changed, which is not true
if the pointer is derived from a function argument.
  • Loading branch information
arsenm committed Feb 11, 2020
1 parent b2c44de commit 86f9117
Show file tree
Hide file tree
Showing 9 changed files with 695 additions and 29 deletions.
4 changes: 3 additions & 1 deletion llvm/lib/Target/AMDGPU/SIISelLowering.cpp
Expand Up @@ -1251,9 +1251,11 @@ bool SITargetLowering::allowsMisalignedMemoryAccessesImpl(
// If we have an uniform constant load, it still requires using a slow
// buffer instruction if unaligned.
if (IsFast) {
// Accesses can really be issued as 1-byte aligned or 4-byte aligned, so
// 2-byte alignment is worse than 1 unless doing a 2-byte accesss.
*IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) ?
(Align % 4 == 0) : true;
Align >= 4 : Align != 2;
}

return true;
Expand Down
12 changes: 9 additions & 3 deletions llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
Expand Up @@ -1028,8 +1028,10 @@ bool Vectorizer::vectorizeStoreChain(
unsigned NewAlign = getOrEnforceKnownAlignment(S0->getPointerOperand(),
StackAdjustedAlignment,
DL, S0, nullptr, &DT);
if (NewAlign != 0)
if (NewAlign >= Alignment.value())
Alignment = Align(NewAlign);
else
return false;
}

if (!TTI.isLegalToVectorizeStoreChain(SzInBytes, Alignment.value(), AS)) {
Expand Down Expand Up @@ -1168,8 +1170,12 @@ bool Vectorizer::vectorizeLoadChain(
vectorizeLoadChain(Chains.second, InstructionsProcessed);
}

Alignment = getOrEnforceKnownAlignment(
L0->getPointerOperand(), StackAdjustedAlignment, DL, L0, nullptr, &DT);
unsigned NewAlign = getOrEnforceKnownAlignment(
L0->getPointerOperand(), StackAdjustedAlignment, DL, L0, nullptr, &DT);
if (NewAlign >= Alignment)
Alignment = NewAlign;
else
return false;
}

if (!TTI.isLegalToVectorizeLoadChain(SzInBytes, Alignment, AS)) {
Expand Down
13 changes: 8 additions & 5 deletions llvm/test/CodeGen/AMDGPU/chain-hi-to-lo.ll
Expand Up @@ -199,14 +199,17 @@ define amdgpu_kernel void @vload2_private(i16 addrspace(1)* nocapture readonly %
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v2, s4
; GCN-NEXT: v_mov_b32_e32 v3, s5
; GCN-NEXT: global_load_ushort v4, v[2:3], off offset:4
; GCN-NEXT: global_load_dword v2, v[2:3], off
; GCN-NEXT: global_load_ushort v4, v[2:3], off
; GCN-NEXT: v_mov_b32_e32 v0, s6
; GCN-NEXT: v_mov_b32_e32 v1, s7
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: buffer_store_short v2, off, s[0:3], s9 offset:4
; GCN-NEXT: buffer_store_short_d16_hi v2, off, s[0:3], s9 offset:6
; GCN-NEXT: buffer_store_short v4, off, s[0:3], s9 offset:8
; GCN-NEXT: buffer_store_short v4, off, s[0:3], s9 offset:4
; GCN-NEXT: global_load_ushort v4, v[2:3], off offset:2
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: buffer_store_short v4, off, s[0:3], s9 offset:6
; GCN-NEXT: global_load_ushort v2, v[2:3], off offset:4
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: buffer_store_short v2, off, s[0:3], s9 offset:8
; GCN-NEXT: buffer_load_ushort v2, off, s[0:3], s9 offset:4
; GCN-NEXT: buffer_load_ushort v4, off, s[0:3], s9 offset:6
; GCN-NEXT: s_waitcnt vmcnt(1)
Expand Down
328 changes: 328 additions & 0 deletions llvm/test/CodeGen/AMDGPU/fast-unaligned-load-store.global.ll
@@ -0,0 +1,328 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -mattr=-unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX7-ALIGNED %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -mattr=+unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX7-UNALIGNED %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -mattr=+unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX9 %s

; Should not merge this to a dword load
define i32 @global_load_2xi16_align2(i16 addrspace(1)* %p) #0 {
; GFX7-ALIGNED-LABEL: global_load_2xi16_align2:
; GFX7-ALIGNED: ; %bb.0:
; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-ALIGNED-NEXT: v_add_i32_e32 v2, vcc, 2, v0
; GFX7-ALIGNED-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc
; GFX7-ALIGNED-NEXT: flat_load_ushort v0, v[0:1]
; GFX7-ALIGNED-NEXT: flat_load_ushort v1, v[2:3]
; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-ALIGNED-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX7-ALIGNED-NEXT: v_or_b32_e32 v0, v0, v1
; GFX7-ALIGNED-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-UNALIGNED-LABEL: global_load_2xi16_align2:
; GFX7-UNALIGNED: ; %bb.0:
; GFX7-UNALIGNED-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-UNALIGNED-NEXT: v_add_i32_e32 v2, vcc, 2, v0
; GFX7-UNALIGNED-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc
; GFX7-UNALIGNED-NEXT: flat_load_ushort v0, v[0:1]
; GFX7-UNALIGNED-NEXT: flat_load_ushort v1, v[2:3]
; GFX7-UNALIGNED-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-UNALIGNED-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX7-UNALIGNED-NEXT: v_or_b32_e32 v0, v0, v1
; GFX7-UNALIGNED-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_load_2xi16_align2:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: global_load_ushort v2, v[0:1], off
; GFX9-NEXT: global_load_ushort v0, v[0:1], off offset:2
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_lshl_or_b32 v0, v0, 16, v2
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep.p = getelementptr i16, i16 addrspace(1)* %p, i64 1
%p.0 = load i16, i16 addrspace(1)* %p, align 2
%p.1 = load i16, i16 addrspace(1)* %gep.p, align 2
%zext.0 = zext i16 %p.0 to i32
%zext.1 = zext i16 %p.1 to i32
%shl.1 = shl i32 %zext.1, 16
%or = or i32 %zext.0, %shl.1
ret i32 %or
}

; Should not merge this to a dword store
define amdgpu_kernel void @global_store_2xi16_align2(i16 addrspace(1)* %p, i16 addrspace(1)* %r) #0 {
; GFX7-ALIGNED-LABEL: global_store_2xi16_align2:
; GFX7-ALIGNED: ; %bb.0:
; GFX7-ALIGNED-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2
; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v2, 1
; GFX7-ALIGNED-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v0, s0
; GFX7-ALIGNED-NEXT: s_add_u32 s2, s0, 2
; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v1, s1
; GFX7-ALIGNED-NEXT: flat_store_short v[0:1], v2
; GFX7-ALIGNED-NEXT: s_addc_u32 s3, s1, 0
; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v0, s2
; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v2, 2
; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v1, s3
; GFX7-ALIGNED-NEXT: flat_store_short v[0:1], v2
; GFX7-ALIGNED-NEXT: s_endpgm
;
; GFX7-UNALIGNED-LABEL: global_store_2xi16_align2:
; GFX7-UNALIGNED: ; %bb.0:
; GFX7-UNALIGNED-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2
; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v2, 1
; GFX7-UNALIGNED-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v0, s0
; GFX7-UNALIGNED-NEXT: s_add_u32 s2, s0, 2
; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v1, s1
; GFX7-UNALIGNED-NEXT: flat_store_short v[0:1], v2
; GFX7-UNALIGNED-NEXT: s_addc_u32 s3, s1, 0
; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v0, s2
; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v2, 2
; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v1, s3
; GFX7-UNALIGNED-NEXT: flat_store_short v[0:1], v2
; GFX7-UNALIGNED-NEXT: s_endpgm
;
; GFX9-LABEL: global_store_2xi16_align2:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
; GFX9-NEXT: v_mov_b32_e32 v2, 1
; GFX9-NEXT: v_mov_b32_e32 v3, 2
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s0
; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: global_store_short v[0:1], v2, off
; GFX9-NEXT: global_store_short v[0:1], v3, off offset:2
; GFX9-NEXT: s_endpgm
%gep.r = getelementptr i16, i16 addrspace(1)* %r, i64 1
store i16 1, i16 addrspace(1)* %r, align 2
store i16 2, i16 addrspace(1)* %gep.r, align 2
ret void
}

; Should produce align 1 dword when legal
define i32 @global_load_2xi16_align1(i16 addrspace(1)* %p) #0 {
; GFX7-ALIGNED-LABEL: global_load_2xi16_align1:
; GFX7-ALIGNED: ; %bb.0:
; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-ALIGNED-NEXT: v_add_i32_e32 v2, vcc, 2, v0
; GFX7-ALIGNED-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc
; GFX7-ALIGNED-NEXT: v_add_i32_e32 v4, vcc, 1, v0
; GFX7-ALIGNED-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
; GFX7-ALIGNED-NEXT: flat_load_ubyte v6, v[0:1]
; GFX7-ALIGNED-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; GFX7-ALIGNED-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; GFX7-ALIGNED-NEXT: flat_load_ubyte v2, v[2:3]
; GFX7-ALIGNED-NEXT: flat_load_ubyte v3, v[4:5]
; GFX7-ALIGNED-NEXT: flat_load_ubyte v0, v[0:1]
; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
; GFX7-ALIGNED-NEXT: v_lshlrev_b32_e32 v1, 8, v3
; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-ALIGNED-NEXT: v_lshlrev_b32_e32 v0, 8, v0
; GFX7-ALIGNED-NEXT: v_or_b32_e32 v0, v0, v2
; GFX7-ALIGNED-NEXT: v_or_b32_e32 v1, v1, v6
; GFX7-ALIGNED-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX7-ALIGNED-NEXT: v_or_b32_e32 v0, v1, v0
; GFX7-ALIGNED-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-UNALIGNED-LABEL: global_load_2xi16_align1:
; GFX7-UNALIGNED: ; %bb.0:
; GFX7-UNALIGNED-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-UNALIGNED-NEXT: flat_load_dword v0, v[0:1]
; GFX7-UNALIGNED-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-UNALIGNED-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_load_2xi16_align1:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: global_load_dword v0, v[0:1], off
; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff
; GFX9-NEXT: s_mov_b32 s4, 0xffff
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_bfi_b32 v1, v1, 0, v0
; GFX9-NEXT: v_and_or_b32 v0, v0, s4, v1
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep.p = getelementptr i16, i16 addrspace(1)* %p, i64 1
%p.0 = load i16, i16 addrspace(1)* %p, align 1
%p.1 = load i16, i16 addrspace(1)* %gep.p, align 1
%zext.0 = zext i16 %p.0 to i32
%zext.1 = zext i16 %p.1 to i32
%shl.1 = shl i32 %zext.1, 16
%or = or i32 %zext.0, %shl.1
ret i32 %or
}

; Should produce align 1 dword when legal
define amdgpu_kernel void @global_store_2xi16_align1(i16 addrspace(1)* %p, i16 addrspace(1)* %r) #0 {
; GFX7-ALIGNED-LABEL: global_store_2xi16_align1:
; GFX7-ALIGNED: ; %bb.0:
; GFX7-ALIGNED-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2
; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v4, 1
; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v5, 0
; GFX7-ALIGNED-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-ALIGNED-NEXT: s_add_u32 s2, s0, 2
; GFX7-ALIGNED-NEXT: s_addc_u32 s3, s1, 0
; GFX7-ALIGNED-NEXT: s_add_u32 s4, s0, 1
; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v0, s0
; GFX7-ALIGNED-NEXT: s_addc_u32 s5, s1, 0
; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v1, s1
; GFX7-ALIGNED-NEXT: s_add_u32 s0, s0, 3
; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v2, s4
; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v3, s5
; GFX7-ALIGNED-NEXT: flat_store_byte v[0:1], v4
; GFX7-ALIGNED-NEXT: flat_store_byte v[2:3], v5
; GFX7-ALIGNED-NEXT: s_addc_u32 s1, s1, 0
; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v0, s0
; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v2, s2
; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v1, s1
; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v4, 2
; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v3, s3
; GFX7-ALIGNED-NEXT: flat_store_byte v[0:1], v5
; GFX7-ALIGNED-NEXT: flat_store_byte v[2:3], v4
; GFX7-ALIGNED-NEXT: s_endpgm
;
; GFX7-UNALIGNED-LABEL: global_store_2xi16_align1:
; GFX7-UNALIGNED: ; %bb.0:
; GFX7-UNALIGNED-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2
; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v2, 0x20001
; GFX7-UNALIGNED-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v0, s0
; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v1, s1
; GFX7-UNALIGNED-NEXT: flat_store_dword v[0:1], v2
; GFX7-UNALIGNED-NEXT: s_endpgm
;
; GFX9-LABEL: global_store_2xi16_align1:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
; GFX9-NEXT: v_mov_b32_e32 v2, 0x20001
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s0
; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: global_store_dword v[0:1], v2, off
; GFX9-NEXT: s_endpgm
%gep.r = getelementptr i16, i16 addrspace(1)* %r, i64 1
store i16 1, i16 addrspace(1)* %r, align 1
store i16 2, i16 addrspace(1)* %gep.r, align 1
ret void
}

; Should merge this to a dword load
define i32 @global_load_2xi16_align4(i16 addrspace(1)* %p) #0 {
; GFX7-LABEL: load_2xi16_align4:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: flat_load_dword v0, v[0:1]
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-ALIGNED-LABEL: global_load_2xi16_align4:
; GFX7-ALIGNED: ; %bb.0:
; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-ALIGNED-NEXT: flat_load_dword v0, v[0:1]
; GFX7-ALIGNED-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-ALIGNED-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-UNALIGNED-LABEL: global_load_2xi16_align4:
; GFX7-UNALIGNED: ; %bb.0:
; GFX7-UNALIGNED-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-UNALIGNED-NEXT: flat_load_dword v0, v[0:1]
; GFX7-UNALIGNED-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-UNALIGNED-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_load_2xi16_align4:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: global_load_dword v0, v[0:1], off
; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff
; GFX9-NEXT: s_mov_b32 s4, 0xffff
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_bfi_b32 v1, v1, 0, v0
; GFX9-NEXT: v_and_or_b32 v0, v0, s4, v1
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep.p = getelementptr i16, i16 addrspace(1)* %p, i64 1
%p.0 = load i16, i16 addrspace(1)* %p, align 4
%p.1 = load i16, i16 addrspace(1)* %gep.p, align 2
%zext.0 = zext i16 %p.0 to i32
%zext.1 = zext i16 %p.1 to i32
%shl.1 = shl i32 %zext.1, 16
%or = or i32 %zext.0, %shl.1
ret i32 %or
}

; Should merge this to a dword store
define amdgpu_kernel void @global_store_2xi16_align4(i16 addrspace(1)* %p, i16 addrspace(1)* %r) #0 {
; GFX7-LABEL: global_store_2xi16_align4:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2
; GFX7-NEXT: v_mov_b32_e32 v2, 0x20001
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v0, s0
; GFX7-NEXT: v_mov_b32_e32 v1, s1
; GFX7-NEXT: flat_store_dword v[0:1], v2
; GFX7-NEXT: s_endpgm
;
; GFX7-ALIGNED-LABEL: global_store_2xi16_align4:
; GFX7-ALIGNED: ; %bb.0:
; GFX7-ALIGNED-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2
; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v2, 0x20001
; GFX7-ALIGNED-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v0, s0
; GFX7-ALIGNED-NEXT: v_mov_b32_e32 v1, s1
; GFX7-ALIGNED-NEXT: flat_store_dword v[0:1], v2
; GFX7-ALIGNED-NEXT: s_endpgm
;
; GFX7-UNALIGNED-LABEL: global_store_2xi16_align4:
; GFX7-UNALIGNED: ; %bb.0:
; GFX7-UNALIGNED-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2
; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v2, 0x20001
; GFX7-UNALIGNED-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v0, s0
; GFX7-UNALIGNED-NEXT: v_mov_b32_e32 v1, s1
; GFX7-UNALIGNED-NEXT: flat_store_dword v[0:1], v2
; GFX7-UNALIGNED-NEXT: s_endpgm
;
; GFX9-LABEL: global_store_2xi16_align4:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
; GFX9-NEXT: v_mov_b32_e32 v2, 0x20001
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s0
; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: global_store_dword v[0:1], v2, off
; GFX9-NEXT: s_endpgm
%gep.r = getelementptr i16, i16 addrspace(1)* %r, i64 1
store i16 1, i16 addrspace(1)* %r, align 4
store i16 2, i16 addrspace(1)* %gep.r, align 2
ret void
}

































0 comments on commit 86f9117

Please sign in to comment.