Skip to content

Commit

Permalink
[AMDGPU] Increased vector length for global/constant loads.
Browse files Browse the repository at this point in the history
Summary: GCN ISA supports instructions that can read 16 consecutive dwords from memory through the scalar data cache; loadstoreVectorizer should take advantage of the wider vector length and pack 16/8 elements of dwords/quadwords.

Author: FarhanaAleen

Reviewed By: rampitec

Subscribers: llvm-commits, AMDGPU

Differential Revision: https://reviews.llvm.org/D43275

llvm-svn: 325518
  • Loading branch information
searlmc1 committed Feb 19, 2018
1 parent bc35f06 commit 419bdab
Show file tree
Hide file tree
Showing 5 changed files with 105 additions and 3 deletions.
30 changes: 28 additions & 2 deletions llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
Expand Up @@ -233,12 +233,38 @@ unsigned AMDGPUTTIImpl::getMinVectorRegisterBitWidth() const {
return 32;
}

unsigned AMDGPUTTIImpl::getLoadVectorFactor(unsigned VF, unsigned LoadSize,
unsigned ChainSizeInBytes,
VectorType *VecTy) const {
unsigned VecRegBitWidth = VF * LoadSize;
if (VecRegBitWidth > 128 && VecTy->getScalarSizeInBits() < 32)
// TODO: Support element-size less than 32bit?
return 128 / LoadSize;

return VF;
}

unsigned AMDGPUTTIImpl::getStoreVectorFactor(unsigned VF, unsigned StoreSize,
unsigned ChainSizeInBytes,
VectorType *VecTy) const {
unsigned VecRegBitWidth = VF * StoreSize;
if (VecRegBitWidth > 128)
return 128 / StoreSize;

return VF;
}

unsigned AMDGPUTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
AMDGPUAS AS = ST->getAMDGPUAS();
if (AddrSpace == AS.GLOBAL_ADDRESS ||
AddrSpace == AS.CONSTANT_ADDRESS ||
AddrSpace == AS.CONSTANT_ADDRESS_32BIT ||
AddrSpace == AS.FLAT_ADDRESS)
AddrSpace == AS.CONSTANT_ADDRESS_32BIT) {
if (ST->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
return 128;
return 512;
}

if (AddrSpace == AS.FLAT_ADDRESS)
return 128;
if (AddrSpace == AS.LOCAL_ADDRESS ||
AddrSpace == AS.REGION_ADDRESS)
Expand Down
6 changes: 6 additions & 0 deletions llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
Expand Up @@ -118,6 +118,12 @@ class AMDGPUTTIImpl final : public BasicTTIImplBase<AMDGPUTTIImpl> {
unsigned getNumberOfRegisters(bool Vector) const;
unsigned getRegisterBitWidth(bool Vector) const;
unsigned getMinVectorRegisterBitWidth() const;
unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
unsigned ChainSizeInBytes,
VectorType *VecTy) const;
unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
unsigned ChainSizeInBytes,
VectorType *VecTy) const;
unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const;

bool isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
Expand Down
37 changes: 37 additions & 0 deletions llvm/test/CodeGen/AMDGPU/load-constant-f32.ll
@@ -0,0 +1,37 @@
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s

; Tests whether a load chain of 8 constants gets vectorized into a wider load.
; FUNC-LABEL: {{^}}constant_load_v8f32:
; GCN: s_load_dwordx8
; EG: VTX_READ_128
; EG: VTX_READ_128
define amdgpu_kernel void @constant_load_v8f32(float addrspace(4)* noalias nocapture readonly %weights, float addrspace(1)* noalias nocapture %out_ptr) {
entry:
%out_ptr.promoted = load float, float addrspace(1)* %out_ptr, align 4
%tmp = load float, float addrspace(4)* %weights, align 4
%add = fadd float %tmp, %out_ptr.promoted
%arrayidx.1 = getelementptr inbounds float, float addrspace(4)* %weights, i64 1
%tmp1 = load float, float addrspace(4)* %arrayidx.1, align 4
%add.1 = fadd float %tmp1, %add
%arrayidx.2 = getelementptr inbounds float, float addrspace(4)* %weights, i64 2
%tmp2 = load float, float addrspace(4)* %arrayidx.2, align 4
%add.2 = fadd float %tmp2, %add.1
%arrayidx.3 = getelementptr inbounds float, float addrspace(4)* %weights, i64 3
%tmp3 = load float, float addrspace(4)* %arrayidx.3, align 4
%add.3 = fadd float %tmp3, %add.2
%arrayidx.4 = getelementptr inbounds float, float addrspace(4)* %weights, i64 4
%tmp4 = load float, float addrspace(4)* %arrayidx.4, align 4
%add.4 = fadd float %tmp4, %add.3
%arrayidx.5 = getelementptr inbounds float, float addrspace(4)* %weights, i64 5
%tmp5 = load float, float addrspace(4)* %arrayidx.5, align 4
%add.5 = fadd float %tmp5, %add.4
%arrayidx.6 = getelementptr inbounds float, float addrspace(4)* %weights, i64 6
%tmp6 = load float, float addrspace(4)* %arrayidx.6, align 4
%add.6 = fadd float %tmp6, %add.5
%arrayidx.7 = getelementptr inbounds float, float addrspace(4)* %weights, i64 7
%tmp7 = load float, float addrspace(4)* %arrayidx.7, align 4
%add.7 = fadd float %tmp7, %add.6
store float %add.7, float addrspace(1)* %out_ptr, align 4
ret void
}
33 changes: 33 additions & 0 deletions llvm/test/CodeGen/AMDGPU/load-constant-f64.ll
Expand Up @@ -13,3 +13,36 @@ define amdgpu_kernel void @constant_load_f64(double addrspace(1)* %out, double a
}

attributes #0 = { nounwind }

; Tests whether a load-chain of 8 constants of 64bit each gets vectorized into a wider load.
; FUNC-LABEL: {{^}}constant_load_2v4f64:
; GCN: s_load_dwordx16
define amdgpu_kernel void @constant_load_2v4f64(double addrspace(4)* noalias nocapture readonly %weights, double addrspace(1)* noalias nocapture %out_ptr) {
entry:
%out_ptr.promoted = load double, double addrspace(1)* %out_ptr, align 4
%tmp = load double, double addrspace(4)* %weights, align 4
%add = fadd double %tmp, %out_ptr.promoted
%arrayidx.1 = getelementptr inbounds double, double addrspace(4)* %weights, i64 1
%tmp1 = load double, double addrspace(4)* %arrayidx.1, align 4
%add.1 = fadd double %tmp1, %add
%arrayidx.2 = getelementptr inbounds double, double addrspace(4)* %weights, i64 2
%tmp2 = load double, double addrspace(4)* %arrayidx.2, align 4
%add.2 = fadd double %tmp2, %add.1
%arrayidx.3 = getelementptr inbounds double, double addrspace(4)* %weights, i64 3
%tmp3 = load double, double addrspace(4)* %arrayidx.3, align 4
%add.3 = fadd double %tmp3, %add.2
%arrayidx.4 = getelementptr inbounds double, double addrspace(4)* %weights, i64 4
%tmp4 = load double, double addrspace(4)* %arrayidx.4, align 4
%add.4 = fadd double %tmp4, %add.3
%arrayidx.5 = getelementptr inbounds double, double addrspace(4)* %weights, i64 5
%tmp5 = load double, double addrspace(4)* %arrayidx.5, align 4
%add.5 = fadd double %tmp5, %add.4
%arrayidx.6 = getelementptr inbounds double, double addrspace(4)* %weights, i64 6
%tmp6 = load double, double addrspace(4)* %arrayidx.6, align 4
%add.6 = fadd double %tmp6, %add.5
%arrayidx.7 = getelementptr inbounds double, double addrspace(4)* %weights, i64 7
%tmp7 = load double, double addrspace(4)* %arrayidx.7, align 4
%add.7 = fadd double %tmp7, %add.6
store double %add.7, double addrspace(1)* %out_ptr, align 4
ret void
}
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/AMDGPU/waitcnt-looptest.ll
@@ -1,4 +1,4 @@
; RUN: llc < %s -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=-flat-for-global | FileCheck --check-prefix=GCN %s
; RUN: llc < %s -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=-flat-for-global -amdgpu-load-store-vectorizer=0 | FileCheck --check-prefix=GCN %s

; Check that the waitcnt insertion algorithm correctly propagates wait counts
; from before a loop to the loop header.
Expand Down

0 comments on commit 419bdab

Please sign in to comment.