Skip to content

Commit

Permalink
[AMDGPU] Trim zero components from buffer and image stores
Browse files Browse the repository at this point in the history
For image and buffer stores the default behaviour on GFX11 and
older is to set all unset components to zero. So if we pass
only X component it will be the same as X000, or XY same as XY00.

This patch simplifies the passed vector of components in InstCombine
by removing zero components from the end.

For image stores it also trims DMask if necessary.

Reviewed By: foad, arsenm
Differential Revision: https://reviews.llvm.org/D146737
  • Loading branch information
Mateja Marjanovic committed May 15, 2023
1 parent 73668cc commit 3181a6e
Show file tree
Hide file tree
Showing 4 changed files with 232 additions and 41 deletions.
6 changes: 4 additions & 2 deletions llvm/include/llvm/IR/IntrinsicsAMDGPU.td
Expand Up @@ -872,10 +872,12 @@ defset list<AMDGPUImageDimIntrinsic> AMDGPUImageDimIntrinsics = {

defm int_amdgcn_image_store : AMDGPUImageDimIntrinsicsAll<
"STORE", [], [AMDGPUArg<llvm_anyfloat_ty, "vdata">],
[IntrWriteMem, IntrWillReturn], [SDNPMemOperand]>;
[IntrWriteMem, IntrWillReturn], [SDNPMemOperand]>,
AMDGPUImageDMaskIntrinsic;
defm int_amdgcn_image_store_mip : AMDGPUImageDimIntrinsicsNoMsaa<
"STORE_MIP", [], [AMDGPUArg<llvm_anyfloat_ty, "vdata">],
[IntrWriteMem, IntrWillReturn], [SDNPMemOperand], 1>;
[IntrWriteMem, IntrWillReturn], [SDNPMemOperand], 1>,
AMDGPUImageDMaskIntrinsic;

//////////////////////////////////////////////////////////////////////////
// MSAA intrinsics
Expand Down
131 changes: 108 additions & 23 deletions llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
Expand Up @@ -355,6 +355,36 @@ bool GCNTTIImpl::canSimplifyLegacyMulToMul(const Instruction &I,
return false;
}

// Trim all zero components from the end of the vector \p UseV and return
// an appropriate bitset with known elements.
static APInt trimTrailingZerosInVector(InstCombiner &IC, Value *UseV,
Instruction *I) {
auto *VTy = cast<FixedVectorType>(UseV->getType());
unsigned VWidth = VTy->getNumElements();
APInt DemandedElts = APInt::getAllOnes(VWidth);

for (int i = VWidth - 1; i >= 0; --i) {
APInt DemandOneElt = APInt::getOneBitSet(VWidth, i);
KnownFPClass KnownFPClass =
computeKnownFPClass(UseV, DemandOneElt, IC.getDataLayout(),
/*InterestedClasses=*/fcAllFlags,
/*Depth=*/0, &IC.getTargetLibraryInfo(),
&IC.getAssumptionCache(), I,
&IC.getDominatorTree(),
&IC.getOptimizationRemarkEmitter());
if (KnownFPClass.KnownFPClasses != fcPosZero)
break;
DemandedElts.clearBit(i);
}
return DemandedElts;
}

static Value *simplifyAMDGCNMemoryIntrinsicDemanded(InstCombiner &IC,
IntrinsicInst &II,
APInt DemandedElts,
int DMaskIdx = -1,
bool IsLoad = true);

std::optional<Instruction *>
GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
Intrinsic::ID IID = II.getIntrinsicID();
Expand Down Expand Up @@ -1054,26 +1084,65 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
return IC.replaceInstUsesWith(II, ConstantInt::getFalse(II.getType()));
break;
}
default: {
if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
AMDGPU::getImageDimIntrinsicInfo(II.getIntrinsicID())) {
return simplifyAMDGCNImageIntrinsic(ST, ImageDimIntr, II, IC);
case Intrinsic::amdgcn_buffer_store:
case Intrinsic::amdgcn_buffer_store_format:
case Intrinsic::amdgcn_raw_buffer_store:
case Intrinsic::amdgcn_raw_buffer_store_format:
case Intrinsic::amdgcn_raw_tbuffer_store:
case Intrinsic::amdgcn_struct_buffer_store:
case Intrinsic::amdgcn_struct_buffer_store_format:
case Intrinsic::amdgcn_struct_tbuffer_store:
case Intrinsic::amdgcn_tbuffer_store:
case Intrinsic::amdgcn_image_store_1d:
case Intrinsic::amdgcn_image_store_1darray:
case Intrinsic::amdgcn_image_store_2d:
case Intrinsic::amdgcn_image_store_2darray:
case Intrinsic::amdgcn_image_store_2darraymsaa:
case Intrinsic::amdgcn_image_store_2dmsaa:
case Intrinsic::amdgcn_image_store_3d:
case Intrinsic::amdgcn_image_store_cube:
case Intrinsic::amdgcn_image_store_mip_1d:
case Intrinsic::amdgcn_image_store_mip_1darray:
case Intrinsic::amdgcn_image_store_mip_2d:
case Intrinsic::amdgcn_image_store_mip_2darray:
case Intrinsic::amdgcn_image_store_mip_3d:
case Intrinsic::amdgcn_image_store_mip_cube: {
if (!isa<FixedVectorType>(II.getArgOperand(0)->getType()))
break;

APInt DemandedElts =
trimTrailingZerosInVector(IC, II.getArgOperand(0), &II);

int DMaskIdx = getAMDGPUImageDMaskIntrinsic(II.getIntrinsicID()) ? 1 : -1;
if (simplifyAMDGCNMemoryIntrinsicDemanded(IC, II, DemandedElts, DMaskIdx,
false)) {
return IC.eraseInstFromFunction(II);
}

break;
}
}
if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
AMDGPU::getImageDimIntrinsicInfo(II.getIntrinsicID())) {
return simplifyAMDGCNImageIntrinsic(ST, ImageDimIntr, II, IC);
}
return std::nullopt;
}

/// Implement SimplifyDemandedVectorElts for amdgcn buffer and image intrinsics.
///
/// The result of simplifying amdgcn image and buffer store intrinsics is updating
/// definitions of the intrinsics vector argument, not Uses of the result like
/// image and buffer loads.
/// Note: This only supports non-TFE/LWE image intrinsic calls; those have
/// struct returns.
static Value *simplifyAMDGCNMemoryIntrinsicDemanded(InstCombiner &IC,
IntrinsicInst &II,
APInt DemandedElts,
int DMaskIdx = -1) {
int DMaskIdx, bool IsLoad) {

auto *IIVTy = cast<FixedVectorType>(II.getType());
auto *IIVTy = cast<FixedVectorType>(IsLoad ? II.getType()
: II.getOperand(0)->getType());
unsigned VWidth = IIVTy->getNumElements();
if (VWidth == 1)
return nullptr;
Expand Down Expand Up @@ -1144,13 +1213,13 @@ static Value *simplifyAMDGCNMemoryIntrinsicDemanded(InstCombiner &IC,
DemandedElts &= (1 << llvm::popcount(DMaskVal)) - 1;

unsigned NewDMaskVal = 0;
unsigned OrigLoadIdx = 0;
unsigned OrigLdStIdx = 0;
for (unsigned SrcIdx = 0; SrcIdx < 4; ++SrcIdx) {
const unsigned Bit = 1 << SrcIdx;
if (!!(DMaskVal & Bit)) {
if (!!DemandedElts[OrigLoadIdx])
if (!!DemandedElts[OrigLdStIdx])
NewDMaskVal |= Bit;
OrigLoadIdx++;
OrigLdStIdx++;
}
}

Expand Down Expand Up @@ -1178,29 +1247,45 @@ static Value *simplifyAMDGCNMemoryIntrinsicDemanded(InstCombiner &IC,
(NewNumElts == 1) ? EltTy : FixedVectorType::get(EltTy, NewNumElts);
OverloadTys[0] = NewTy;

if (!IsLoad) {
SmallVector<int, 8> EltMask;
for (unsigned OrigStoreIdx = 0; OrigStoreIdx < VWidth; ++OrigStoreIdx)
if (DemandedElts[OrigStoreIdx])
EltMask.push_back(OrigStoreIdx);

if (NewNumElts == 1)
Args[0] = IC.Builder.CreateExtractElement(II.getOperand(0), EltMask[0]);
else
Args[0] = IC.Builder.CreateShuffleVector(II.getOperand(0), EltMask);
}

Function *NewIntrin = Intrinsic::getDeclaration(
II.getModule(), II.getIntrinsicID(), OverloadTys);
CallInst *NewCall = IC.Builder.CreateCall(NewIntrin, Args);
NewCall->takeName(&II);
NewCall->copyMetadata(II);

if (NewNumElts == 1) {
return IC.Builder.CreateInsertElement(UndefValue::get(IIVTy), NewCall,
DemandedElts.countr_zero());
}
if (IsLoad) {
if (NewNumElts == 1) {
return IC.Builder.CreateInsertElement(UndefValue::get(IIVTy), NewCall,
DemandedElts.countr_zero());
}

SmallVector<int, 8> EltMask;
unsigned NewLoadIdx = 0;
for (unsigned OrigLoadIdx = 0; OrigLoadIdx < VWidth; ++OrigLoadIdx) {
if (!!DemandedElts[OrigLoadIdx])
EltMask.push_back(NewLoadIdx++);
else
EltMask.push_back(NewNumElts);
}
SmallVector<int, 8> EltMask;
unsigned NewLoadIdx = 0;
for (unsigned OrigLoadIdx = 0; OrigLoadIdx < VWidth; ++OrigLoadIdx) {
if (!!DemandedElts[OrigLoadIdx])
EltMask.push_back(NewLoadIdx++);
else
EltMask.push_back(NewNumElts);
}

auto *Shuffle = IC.Builder.CreateShuffleVector(NewCall, EltMask);

Value *Shuffle = IC.Builder.CreateShuffleVector(NewCall, EltMask);
return Shuffle;
}

return Shuffle;
return NewCall;
}

std::optional<Value *> GCNTTIImpl::simplifyDemandedVectorEltsIntrinsic(
Expand Down
32 changes: 16 additions & 16 deletions llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll
Expand Up @@ -66,7 +66,7 @@ define double @test_constant_fold_rcp_f64_43() nounwind {

define float @test_constant_fold_rcp_f32_43_strictfp() nounwind strictfp {
; CHECK-LABEL: @test_constant_fold_rcp_f32_43_strictfp(
; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.rcp.f32(float 4.300000e+01) #[[ATTR14:[0-9]+]]
; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.rcp.f32(float 4.300000e+01) #[[ATTR13:[0-9]+]]
; CHECK-NEXT: ret float [[VAL]]
;
%val = call float @llvm.amdgcn.rcp.f32(float 4.300000e+01) strictfp nounwind readnone
Expand Down Expand Up @@ -107,7 +107,7 @@ define double @test_constant_fold_sqrt_f64_undef() nounwind {

define half @test_constant_fold_sqrt_f16_0() nounwind {
; CHECK-LABEL: @test_constant_fold_sqrt_f16_0(
; CHECK-NEXT: [[VAL:%.*]] = call half @llvm.amdgcn.sqrt.f16(half 0xH0000) #[[ATTR15:[0-9]+]]
; CHECK-NEXT: [[VAL:%.*]] = call half @llvm.amdgcn.sqrt.f16(half 0xH0000) #[[ATTR14:[0-9]+]]
; CHECK-NEXT: ret half [[VAL]]
;
%val = call half @llvm.amdgcn.sqrt.f16(half 0.0) nounwind readnone
Expand All @@ -116,7 +116,7 @@ define half @test_constant_fold_sqrt_f16_0() nounwind {

define float @test_constant_fold_sqrt_f32_0() nounwind {
; CHECK-LABEL: @test_constant_fold_sqrt_f32_0(
; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.sqrt.f32(float 0.000000e+00) #[[ATTR15]]
; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.sqrt.f32(float 0.000000e+00) #[[ATTR14]]
; CHECK-NEXT: ret float [[VAL]]
;
%val = call float @llvm.amdgcn.sqrt.f32(float 0.0) nounwind readnone
Expand All @@ -125,7 +125,7 @@ define float @test_constant_fold_sqrt_f32_0() nounwind {

define double @test_constant_fold_sqrt_f64_0() nounwind {
; CHECK-LABEL: @test_constant_fold_sqrt_f64_0(
; CHECK-NEXT: [[VAL:%.*]] = call double @llvm.amdgcn.sqrt.f64(double 0.000000e+00) #[[ATTR15]]
; CHECK-NEXT: [[VAL:%.*]] = call double @llvm.amdgcn.sqrt.f64(double 0.000000e+00) #[[ATTR14]]
; CHECK-NEXT: ret double [[VAL]]
;
%val = call double @llvm.amdgcn.sqrt.f64(double 0.0) nounwind readnone
Expand All @@ -134,7 +134,7 @@ define double @test_constant_fold_sqrt_f64_0() nounwind {

define half @test_constant_fold_sqrt_f16_neg0() nounwind {
; CHECK-LABEL: @test_constant_fold_sqrt_f16_neg0(
; CHECK-NEXT: [[VAL:%.*]] = call half @llvm.amdgcn.sqrt.f16(half 0xH8000) #[[ATTR15]]
; CHECK-NEXT: [[VAL:%.*]] = call half @llvm.amdgcn.sqrt.f16(half 0xH8000) #[[ATTR14]]
; CHECK-NEXT: ret half [[VAL]]
;
%val = call half @llvm.amdgcn.sqrt.f16(half -0.0) nounwind readnone
Expand All @@ -143,7 +143,7 @@ define half @test_constant_fold_sqrt_f16_neg0() nounwind {

define float @test_constant_fold_sqrt_f32_neg0() nounwind {
; CHECK-LABEL: @test_constant_fold_sqrt_f32_neg0(
; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.sqrt.f32(float -0.000000e+00) #[[ATTR15]]
; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.amdgcn.sqrt.f32(float -0.000000e+00) #[[ATTR14]]
; CHECK-NEXT: ret float [[VAL]]
;
%val = call float @llvm.amdgcn.sqrt.f32(float -0.0) nounwind readnone
Expand All @@ -152,7 +152,7 @@ define float @test_constant_fold_sqrt_f32_neg0() nounwind {

define double @test_constant_fold_sqrt_f64_neg0() nounwind {
; CHECK-LABEL: @test_constant_fold_sqrt_f64_neg0(
; CHECK-NEXT: [[VAL:%.*]] = call double @llvm.amdgcn.sqrt.f64(double -0.000000e+00) #[[ATTR15]]
; CHECK-NEXT: [[VAL:%.*]] = call double @llvm.amdgcn.sqrt.f64(double -0.000000e+00) #[[ATTR14]]
; CHECK-NEXT: ret double [[VAL]]
;
%val = call double @llvm.amdgcn.sqrt.f64(double -0.0) nounwind readnone
Expand Down Expand Up @@ -644,7 +644,7 @@ define i1 @test_class_isnan_f32(float %x) nounwind {

define i1 @test_class_isnan_f32_strict(float %x) nounwind {
; CHECK-LABEL: @test_class_isnan_f32_strict(
; CHECK-NEXT: [[VAL:%.*]] = call i1 @llvm.amdgcn.class.f32(float [[X:%.*]], i32 3) #[[ATTR16:[0-9]+]]
; CHECK-NEXT: [[VAL:%.*]] = call i1 @llvm.amdgcn.class.f32(float [[X:%.*]], i32 3) #[[ATTR15:[0-9]+]]
; CHECK-NEXT: ret i1 [[VAL]]
;
%val = call i1 @llvm.amdgcn.class.f32(float %x, i32 3) strictfp
Expand All @@ -662,7 +662,7 @@ define i1 @test_class_is_p0_n0_f32(float %x) nounwind {

define i1 @test_class_is_p0_n0_f32_strict(float %x) nounwind {
; CHECK-LABEL: @test_class_is_p0_n0_f32_strict(
; CHECK-NEXT: [[VAL:%.*]] = call i1 @llvm.amdgcn.class.f32(float [[X:%.*]], i32 96) #[[ATTR16]]
; CHECK-NEXT: [[VAL:%.*]] = call i1 @llvm.amdgcn.class.f32(float [[X:%.*]], i32 96) #[[ATTR15]]
; CHECK-NEXT: ret i1 [[VAL]]
;
%val = call i1 @llvm.amdgcn.class.f32(float %x, i32 96) strictfp
Expand Down Expand Up @@ -1275,8 +1275,8 @@ define i32 @ubfe_offset_0_width_0(i32 %src) {

define i32 @ubfe_offset_0_width_3(i32 %src) {
; CHECK-LABEL: @ubfe_offset_0_width_3(
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[SRC:%.*]], 7
; CHECK-NEXT: ret i32 [[TMP1]]
; CHECK-NEXT: [[BFE:%.*]] = and i32 [[SRC:%.*]], 7
; CHECK-NEXT: ret i32 [[BFE]]
;
%bfe = call i32 @llvm.amdgcn.ubfe.i32(i32 %src, i32 0, i32 3)
ret i32 %bfe
Expand Down Expand Up @@ -1793,7 +1793,7 @@ define i64 @icmp_constant_inputs_false() {

define i64 @icmp_constant_inputs_true() {
; CHECK-LABEL: @icmp_constant_inputs_true(
; CHECK-NEXT: [[RESULT:%.*]] = call i64 @llvm.read_register.i64(metadata [[META0:![0-9]+]]) #[[ATTR17:[0-9]+]]
; CHECK-NEXT: [[RESULT:%.*]] = call i64 @llvm.read_register.i64(metadata [[META0:![0-9]+]]) #[[ATTR16:[0-9]+]]
; CHECK-NEXT: ret i64 [[RESULT]]
;
%result = call i64 @llvm.amdgcn.icmp.i64.i32(i32 9, i32 8, i32 34)
Expand Down Expand Up @@ -2500,7 +2500,7 @@ define i64 @fcmp_constant_inputs_false() {

define i64 @fcmp_constant_inputs_true() {
; CHECK-LABEL: @fcmp_constant_inputs_true(
; CHECK-NEXT: [[RESULT:%.*]] = call i64 @llvm.read_register.i64(metadata [[META0]]) #[[ATTR17]]
; CHECK-NEXT: [[RESULT:%.*]] = call i64 @llvm.read_register.i64(metadata [[META0]]) #[[ATTR16]]
; CHECK-NEXT: ret i64 [[RESULT]]
;
%result = call i64 @llvm.amdgcn.fcmp.i64.f32(float 2.0, float 4.0, i32 4)
Expand Down Expand Up @@ -2542,7 +2542,7 @@ define i64 @ballot_zero_64() {

define i64 @ballot_one_64() {
; CHECK-LABEL: @ballot_one_64(
; CHECK-NEXT: [[B:%.*]] = call i64 @llvm.read_register.i64(metadata [[META0]]) #[[ATTR17]]
; CHECK-NEXT: [[B:%.*]] = call i64 @llvm.read_register.i64(metadata [[META0]]) #[[ATTR16]]
; CHECK-NEXT: ret i64 [[B]]
;
%b = call i64 @llvm.amdgcn.ballot.i64(i1 1)
Expand All @@ -2568,7 +2568,7 @@ define i32 @ballot_zero_32() {

define i32 @ballot_one_32() {
; CHECK-LABEL: @ballot_one_32(
; CHECK-NEXT: [[B:%.*]] = call i32 @llvm.read_register.i32(metadata [[META1:![0-9]+]]) #[[ATTR17]]
; CHECK-NEXT: [[B:%.*]] = call i32 @llvm.read_register.i32(metadata [[META1:![0-9]+]]) #[[ATTR16]]
; CHECK-NEXT: ret i32 [[B]]
;
%b = call i32 @llvm.amdgcn.ballot.i32(i1 1)
Expand Down Expand Up @@ -5586,7 +5586,7 @@ define double @trig_preop_constfold() {

define double @trig_preop_constfold_strictfp() {
; CHECK-LABEL: @trig_preop_constfold_strictfp(
; CHECK-NEXT: [[VAL:%.*]] = call double @llvm.amdgcn.trig.preop.f64(double 3.454350e+02, i32 5) #[[ATTR16]]
; CHECK-NEXT: [[VAL:%.*]] = call double @llvm.amdgcn.trig.preop.f64(double 3.454350e+02, i32 5) #[[ATTR15]]
; CHECK-NEXT: ret double [[VAL]]
;
%val = call double @llvm.amdgcn.trig.preop.f64(double 3.454350e+02, i32 5) strictfp
Expand Down

0 comments on commit 3181a6e

Please sign in to comment.