diff --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td b/llvm/lib/Target/AMDGPU/FLATInstructions.td index 09f59af065896b..3168aaa889953f 100644 --- a/llvm/lib/Target/AMDGPU/FLATInstructions.td +++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td @@ -937,73 +937,73 @@ let OtherPredicates = [isGFX90APlus] in // Patterns for global loads with no offset. class FlatLoadPat : GCNPat < - (vt (node (FlatOffset i64:$vaddr, i16:$offset))), + (vt (node (FlatOffset i64:$vaddr, i32:$offset))), (inst $vaddr, $offset) >; class FlatLoadPat_D16 : GCNPat < - (node (FlatOffset (i64 VReg_64:$vaddr), i16:$offset), vt:$in), + (node (FlatOffset (i64 VReg_64:$vaddr), i32:$offset), vt:$in), (inst $vaddr, $offset, 0, $in) >; class FlatSignedLoadPat_D16 : GCNPat < - (node (GlobalOffset (i64 VReg_64:$vaddr), i16:$offset), vt:$in), + (node (GlobalOffset (i64 VReg_64:$vaddr), i32:$offset), vt:$in), (inst $vaddr, $offset, 0, $in) >; class GlobalLoadSaddrPat_D16 : GCNPat < - (vt (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i16:$offset), vt:$in)), + (vt (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i32:$offset), vt:$in)), (inst $saddr, $voffset, $offset, 0, $in) >; class FlatLoadSignedPat : GCNPat < - (vt (node (GlobalOffset (i64 VReg_64:$vaddr), i16:$offset))), + (vt (node (GlobalOffset (i64 VReg_64:$vaddr), i32:$offset))), (inst $vaddr, $offset) >; class GlobalLoadSaddrPat : GCNPat < - (vt (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i16:$offset))), + (vt (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i32:$offset))), (inst $saddr, $voffset, $offset, 0) >; class GlobalStoreSaddrPat : GCNPat < - (node vt:$data, (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i16:$offset)), + (node vt:$data, (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i32:$offset)), (inst $voffset, getVregSrcForVT.ret:$data, $saddr, $offset) >; class GlobalAtomicStoreSaddrPat : GCNPat < - (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i16:$offset), vt:$data), + (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i32:$offset), vt:$data), (inst $voffset, getVregSrcForVT.ret:$data, $saddr, $offset) >; class GlobalAtomicSaddrPat : GCNPat < - (vt (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i16:$offset), data_vt:$data)), + (vt (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i32:$offset), data_vt:$data)), (inst $voffset, getVregSrcForVT.ret:$data, $saddr, $offset) >; class GlobalAtomicNoRtnSaddrPat : GCNPat < - (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i16:$offset), vt:$data), + (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i32:$offset), vt:$data), (inst $voffset, getVregSrcForVT.ret:$data, $saddr, $offset) >; class FlatStorePat : GCNPat < - (node vt:$data, (FlatOffset i64:$vaddr, i16:$offset)), + (node vt:$data, (FlatOffset i64:$vaddr, i32:$offset)), (inst $vaddr, getVregSrcForVT.ret:$data, $offset) >; class FlatStoreSignedPat : GCNPat < - (node vt:$data, (GlobalOffset i64:$vaddr, i16:$offset)), + (node vt:$data, (GlobalOffset i64:$vaddr, i32:$offset)), (inst $vaddr, getVregSrcForVT.ret:$data, $offset) >; class FlatStoreAtomicPat : GCNPat < // atomic store follows atomic binop convention so the address comes // first. - (node (FlatOffset i64:$vaddr, i16:$offset), vt:$data), + (node (FlatOffset i64:$vaddr, i32:$offset), vt:$data), (inst $vaddr, getVregSrcForVT.ret:$data, $offset) >; @@ -1011,7 +1011,7 @@ class FlatStoreSignedAtomicPat : GCNPat < // atomic store follows atomic binop convention so the address comes // first. - (node (GlobalOffset i64:$vaddr, i16:$offset), data_vt:$data), + (node (GlobalOffset i64:$vaddr, i32:$offset), data_vt:$data), (inst $vaddr, getVregSrcForVT.ret:$data, $offset) >; @@ -1020,17 +1020,17 @@ multiclass FlatAtomicPat (node#"_"#vt.Size); defvar noRtnNode = !cast(node#"_noret_"#vt.Size); - def : GCNPat <(vt (rtnNode (FlatOffset i64:$vaddr, i16:$offset), data_vt:$data)), + def : GCNPat <(vt (rtnNode (FlatOffset i64:$vaddr, i32:$offset), data_vt:$data)), (!cast(inst#"_RTN") VReg_64:$vaddr, getVregSrcForVT.ret:$data, $offset)>; let AddedComplexity = 1 in - def : GCNPat <(vt (noRtnNode (FlatOffset i64:$vaddr, i16:$offset), data_vt:$data)), + def : GCNPat <(vt (noRtnNode (FlatOffset i64:$vaddr, i32:$offset), data_vt:$data)), (!cast(inst) VReg_64:$vaddr, getVregSrcForVT.ret:$data, $offset)>; } class FlatSignedAtomicPatBase : GCNPat < - (vt (node (GlobalOffset i64:$vaddr, i16:$offset), data_vt:$data)), + (vt (node (GlobalOffset i64:$vaddr, i32:$offset), data_vt:$data)), (inst VReg_64:$vaddr, getVregSrcForVT.ret:$data, $offset) >; @@ -1063,49 +1063,49 @@ multiclass FlatSignedAtomicPatWithAddrSpace : GCNPat < - (vt (node (ScratchOffset (i32 VGPR_32:$vaddr), i16:$offset))), + (vt (node (ScratchOffset (i32 VGPR_32:$vaddr), i32:$offset))), (inst $vaddr, $offset) >; class ScratchLoadSignedPat_D16 : GCNPat < - (node (ScratchOffset (i32 VGPR_32:$vaddr), i16:$offset), vt:$in), + (node (ScratchOffset (i32 VGPR_32:$vaddr), i32:$offset), vt:$in), (inst $vaddr, $offset, 0, $in) >; class ScratchStoreSignedPat : GCNPat < - (node vt:$data, (ScratchOffset (i32 VGPR_32:$vaddr), i16:$offset)), + (node vt:$data, (ScratchOffset (i32 VGPR_32:$vaddr), i32:$offset)), (inst getVregSrcForVT.ret:$data, $vaddr, $offset) >; class ScratchLoadSaddrPat : GCNPat < - (vt (node (ScratchSAddr (i32 SGPR_32:$saddr), i16:$offset))), + (vt (node (ScratchSAddr (i32 SGPR_32:$saddr), i32:$offset))), (inst $saddr, $offset) >; class ScratchLoadSaddrPat_D16 : GCNPat < - (vt (node (ScratchSAddr (i32 SGPR_32:$saddr), i16:$offset), vt:$in)), + (vt (node (ScratchSAddr (i32 SGPR_32:$saddr), i32:$offset), vt:$in)), (inst $saddr, $offset, 0, $in) >; class ScratchStoreSaddrPat : GCNPat < - (node vt:$data, (ScratchSAddr (i32 SGPR_32:$saddr), i16:$offset)), + (node vt:$data, (ScratchSAddr (i32 SGPR_32:$saddr), i32:$offset)), (inst getVregSrcForVT.ret:$data, $saddr, $offset) >; class ScratchLoadSVaddrPat : GCNPat < - (vt (node (ScratchSVAddr (i32 VGPR_32:$vaddr), (i32 SGPR_32:$saddr), i16:$offset))), + (vt (node (ScratchSVAddr (i32 VGPR_32:$vaddr), (i32 SGPR_32:$saddr), i32:$offset))), (inst $vaddr, $saddr, $offset, 0) >; class ScratchStoreSVaddrPat : GCNPat < - (node vt:$data, (ScratchSVAddr (i32 VGPR_32:$vaddr), (i32 SGPR_32:$saddr), i16:$offset)), + (node vt:$data, (ScratchSVAddr (i32 VGPR_32:$vaddr), (i32 SGPR_32:$saddr), i32:$offset)), (inst getVregSrcForVT.ret:$data, $vaddr, $saddr, $offset) >; class ScratchLoadSVaddrPat_D16 : GCNPat < - (vt (node (ScratchSVAddr (i32 VGPR_32:$vaddr), (i32 SGPR_32:$saddr), i16:$offset), vt:$in)), + (vt (node (ScratchSVAddr (i32 VGPR_32:$vaddr), (i32 SGPR_32:$saddr), i32:$offset), vt:$in)), (inst $vaddr, $saddr, $offset, 0, $in) >; diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp index e465267f2c2073..b9754062201f5a 100644 --- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp +++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp @@ -141,15 +141,10 @@ void AMDGPUInstPrinter::printFlatOffset(const MCInst *MI, unsigned OpNo, bool IsFlatSeg = !(Desc.TSFlags & (SIInstrFlags::FlatGlobal | SIInstrFlags::FlatScratch)); - if (IsFlatSeg) { // Unsigned offset + if (IsFlatSeg) // Unsigned offset printU16ImmDecOperand(MI, OpNo, O); - } else { // Signed offset - if (AMDGPU::isGFX10(STI)) { - O << formatDec(SignExtend32<12>(MI->getOperand(OpNo).getImm())); - } else { - O << formatDec(SignExtend32<13>(MI->getOperand(OpNo).getImm())); - } - } + else // Signed offset + O << formatDec(SignExtend32(Imm, AMDGPU::getNumFlatOffsetBits(STI))); } } diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td index 2066abb0268d87..0fb39c5324965d 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td @@ -1229,7 +1229,7 @@ class NamedOperandU32Default1 : let OperandType = "OPERAND_IMMEDIATE" in { -def flat_offset : CustomOperand; +def flat_offset : CustomOperand; def offset : NamedIntOperand; def offset0 : NamedIntOperand; def offset1 : NamedIntOperand;