Skip to content

Commit

Permalink
Revert "[CGP] Add generic TargetLowering::shouldAlignPointerArgs() im…
Browse files Browse the repository at this point in the history
…plementation"

These commits are causing a test-suite build failure on AIX. Revert for now for time to investigate.
https://lab.llvm.org/buildbot/#/builders/214/builds/5779/steps/9/logs/stdio

This reverts commit bd87a24 and 4c72266.
  • Loading branch information
jakeegan committed Feb 14, 2023
1 parent 12b4f9e commit 08533f8
Show file tree
Hide file tree
Showing 10 changed files with 59 additions and 138 deletions.
8 changes: 4 additions & 4 deletions llvm/include/llvm/CodeGen/TargetLowering.h
Expand Up @@ -1921,10 +1921,10 @@ class TargetLoweringBase {
/// the object whose address is being passed. If so then MinSize is set to the
/// minimum size the object must be to be aligned and PrefAlign is set to the
/// preferred alignment.
virtual bool
shouldUpdatePointerArgAlignment(const CallInst *CI, unsigned &MinSize,
Align &PrefAlign,
const TargetTransformInfo &TTI) const;
virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
Align & /*PrefAlign*/) const {
return false;
}

//===--------------------------------------------------------------------===//
/// \name Helpers for TargetTransformInfo implementations
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/CodeGen/CodeGenPrepare.cpp
Expand Up @@ -2221,10 +2221,10 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, ModifyDT &ModifiedDT) {
}

// Align the pointer arguments to this call if the target thinks it's a good
// idea (generally only useful for memcpy/memmove/memset).
// idea
unsigned MinSize;
Align PrefAlign;
if (TLI->shouldUpdatePointerArgAlignment(CI, MinSize, PrefAlign, *TTI)) {
if (TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
for (auto &Arg : CI->args()) {
// We want to align both objects whose address is used directly and
// objects whose address is used in casts and GEPs, though it only makes
Expand Down
37 changes: 0 additions & 37 deletions llvm/lib/CodeGen/TargetLoweringBase.cpp
Expand Up @@ -42,7 +42,6 @@
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/Casting.h"
Expand Down Expand Up @@ -949,42 +948,6 @@ bool TargetLoweringBase::isFreeAddrSpaceCast(unsigned SrcAS,
return TM.isNoopAddrSpaceCast(SrcAS, DestAS);
}

bool TargetLoweringBase::shouldUpdatePointerArgAlignment(
const CallInst *CI, unsigned &MinSize, Align &PrefAlign,
const TargetTransformInfo &TTI) const {
// For now, we only adjust alignment for memcpy/memmove/memset calls.
auto *MemCI = dyn_cast<MemIntrinsic>(CI);
if (!MemCI)
return false;
auto AddrSpace = MemCI->getDestAddressSpace();
// We assume that scalar register sized values can be loaded/stored
// efficiently. If this is not the case for a given target it should override
// this function.
auto PrefSizeBits =
TTI.getRegisterBitWidth(TargetTransformInfo::RGK_Scalar).getFixedValue();
PrefAlign = Align(PrefSizeBits / 8);
// When building with -Oz, we only increase the alignment if the object is
// at least 8 bytes in size to avoid increased stack/global padding.
// Otherwise, we require at least PrefAlign bytes to be copied.
MinSize = PrefAlign.value();
if (CI->getFunction()->hasMinSize())
MinSize = std::max(MinSize, 8u);

// XXX: we could determine the MachineMemOperand flags instead of assuming
// load+store (but it probably makes no difference for supported targets).
unsigned FastUnalignedAccess = 0;
if (allowsMisalignedMemoryAccesses(
LLT::scalar(PrefSizeBits), AddrSpace, Align(1),
MachineMemOperand::MOStore | MachineMemOperand::MOLoad,
&FastUnalignedAccess) &&
FastUnalignedAccess) {
// If unaligned loads&stores are fast, there is no need to adjust
// alignment.
return false;
}
return true; // unaligned accesses are not possible or slow.
}

void TargetLoweringBase::setJumpIsExpensive(bool isExpensive) {
// If the command-line option was specified, ignore this request.
if (!JumpIsExpensiveOverride.getNumOccurrences())
Expand Down
5 changes: 2 additions & 3 deletions llvm/lib/Target/ARM/ARMISelLowering.cpp
Expand Up @@ -1920,9 +1920,8 @@ ARMTargetLowering::getRegClassFor(MVT VT, bool isDivergent) const {
// memcpy, and other memory intrinsics, typically tries to use LDM/STM if the
// source/dest is aligned and the copy size is large enough. We therefore want
// to align such objects passed to memory intrinsics.
bool ARMTargetLowering::shouldUpdatePointerArgAlignment(
const CallInst *CI, unsigned &MinSize, Align &PrefAlign,
const TargetTransformInfo &TTI) const {
bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize,
Align &PrefAlign) const {
if (!isa<MemIntrinsic>(CI))
return false;
MinSize = 8;
Expand Down
5 changes: 2 additions & 3 deletions llvm/lib/Target/ARM/ARMISelLowering.h
Expand Up @@ -572,9 +572,8 @@ class VectorType;
const TargetRegisterClass *
getRegClassFor(MVT VT, bool isDivergent = false) const override;

bool shouldUpdatePointerArgAlignment(
const CallInst *CI, unsigned &MinSize, Align &PrefAlign,
const TargetTransformInfo &TTI) const override;
bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize,
Align &PrefAlign) const override;

/// createFastISel - This method returns a target specific FastISel object,
/// or null if the target does not support "fast" ISel.
Expand Down
61 changes: 38 additions & 23 deletions llvm/test/CodeGen/RISCV/memcpy-inline.ll
Expand Up @@ -295,35 +295,50 @@ entry:
}

define void @t6() nounwind {
; RV32-LABEL: t6:
; RV32: # %bb.0: # %entry
; RV32-NEXT: lui a0, %hi(spool.splbuf)
; RV32-NEXT: li a1, 88
; RV32-NEXT: sh a1, %lo(spool.splbuf+12)(a0)
; RV32-NEXT: lui a1, 361862
; RV32-NEXT: addi a1, a1, -1960
; RV32-NEXT: sw a1, %lo(spool.splbuf+8)(a0)
; RV32-NEXT: lui a1, 362199
; RV32-NEXT: addi a1, a1, 559
; RV32-NEXT: sw a1, %lo(spool.splbuf+4)(a0)
; RV32-NEXT: lui a1, 460503
; RV32-NEXT: addi a1, a1, 1071
; RV32-NEXT: sw a1, %lo(spool.splbuf)(a0)
; RV32-NEXT: ret
; RV32ALIGNED-LABEL: t6:
; RV32ALIGNED: # %bb.0: # %entry
; RV32ALIGNED-NEXT: addi sp, sp, -16
; RV32ALIGNED-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ALIGNED-NEXT: lui a0, %hi(spool.splbuf)
; RV32ALIGNED-NEXT: addi a0, a0, %lo(spool.splbuf)
; RV32ALIGNED-NEXT: lui a1, %hi(.L.str6)
; RV32ALIGNED-NEXT: addi a1, a1, %lo(.L.str6)
; RV32ALIGNED-NEXT: li a2, 14
; RV32ALIGNED-NEXT: call memcpy@plt
; RV32ALIGNED-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ALIGNED-NEXT: addi sp, sp, 16
; RV32ALIGNED-NEXT: ret
;
; RV64ALIGNED-LABEL: t6:
; RV64ALIGNED: # %bb.0: # %entry
; RV64ALIGNED-NEXT: addi sp, sp, -16
; RV64ALIGNED-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ALIGNED-NEXT: lui a0, %hi(spool.splbuf)
; RV64ALIGNED-NEXT: li a1, 88
; RV64ALIGNED-NEXT: sh a1, %lo(spool.splbuf+12)(a0)
; RV64ALIGNED-NEXT: lui a1, %hi(.LCPI6_0)
; RV64ALIGNED-NEXT: ld a1, %lo(.LCPI6_0)(a1)
; RV64ALIGNED-NEXT: lui a2, 361862
; RV64ALIGNED-NEXT: addiw a2, a2, -1960
; RV64ALIGNED-NEXT: sw a2, %lo(spool.splbuf+8)(a0)
; RV64ALIGNED-NEXT: sd a1, %lo(spool.splbuf)(a0)
; RV64ALIGNED-NEXT: addi a0, a0, %lo(spool.splbuf)
; RV64ALIGNED-NEXT: lui a1, %hi(.L.str6)
; RV64ALIGNED-NEXT: addi a1, a1, %lo(.L.str6)
; RV64ALIGNED-NEXT: li a2, 14
; RV64ALIGNED-NEXT: call memcpy@plt
; RV64ALIGNED-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64ALIGNED-NEXT: addi sp, sp, 16
; RV64ALIGNED-NEXT: ret
;
; RV32UNALIGNED-LABEL: t6:
; RV32UNALIGNED: # %bb.0: # %entry
; RV32UNALIGNED-NEXT: lui a0, %hi(spool.splbuf)
; RV32UNALIGNED-NEXT: li a1, 88
; RV32UNALIGNED-NEXT: sh a1, %lo(spool.splbuf+12)(a0)
; RV32UNALIGNED-NEXT: lui a1, 361862
; RV32UNALIGNED-NEXT: addi a1, a1, -1960
; RV32UNALIGNED-NEXT: sw a1, %lo(spool.splbuf+8)(a0)
; RV32UNALIGNED-NEXT: lui a1, 362199
; RV32UNALIGNED-NEXT: addi a1, a1, 559
; RV32UNALIGNED-NEXT: sw a1, %lo(spool.splbuf+4)(a0)
; RV32UNALIGNED-NEXT: lui a1, 460503
; RV32UNALIGNED-NEXT: addi a1, a1, 1071
; RV32UNALIGNED-NEXT: sw a1, %lo(spool.splbuf)(a0)
; RV32UNALIGNED-NEXT: ret
;
; RV64UNALIGNED-LABEL: t6:
; RV64UNALIGNED: # %bb.0: # %entry
; RV64UNALIGNED-NEXT: lui a0, %hi(.L.str6)
Expand Down
6 changes: 3 additions & 3 deletions llvm/test/CodeGen/WebAssembly/bulk-memory.ll
Expand Up @@ -154,7 +154,7 @@ define void @memset_1024(ptr %dest, i8 %val) {
; BULK-MEM-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer
; BULK-MEM-NEXT: i32.const $push[[L1:[0-9]+]]=, 112
; BULK-MEM-NEXT: i32.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]
; BULK-MEM-NEXT: i32.const $push[[L3:[0-9]+]]=, 8
; BULK-MEM-NEXT: i32.const $push[[L3:[0-9]+]]=, 12
; BULK-MEM-NEXT: i32.add $push[[L4:[0-9]+]]=, $pop[[L2]], $pop[[L3]]
; BULK-MEM-NEXT: i32.const $push[[L5:[0-9]+]]=, 100
; BULK-MEM-NEXT: memory.copy 0, 0, $0, $pop[[L4]], $pop[[L5]]
Expand All @@ -171,7 +171,7 @@ define void @memcpy_alloca_src(ptr %dst) {
; BULK-MEM-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer
; BULK-MEM-NEXT: i32.const $push[[L1:[0-9]+]]=, 112
; BULK-MEM-NEXT: i32.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]
; BULK-MEM-NEXT: i32.const $push[[L3:[0-9]+]]=, 8
; BULK-MEM-NEXT: i32.const $push[[L3:[0-9]+]]=, 12
; BULK-MEM-NEXT: i32.add $push[[L4:[0-9]+]]=, $pop[[L2]], $pop[[L3]]
; BULK-MEM-NEXT: i32.const $push[[L5:[0-9]+]]=, 100
; BULK-MEM-NEXT: memory.copy 0, 0, $pop[[L4]], $0, $pop[[L5]]
Expand All @@ -188,7 +188,7 @@ define void @memcpy_alloca_dst(ptr %src) {
; BULK-MEM-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer
; BULK-MEM-NEXT: i32.const $push[[L1:[0-9]+]]=, 112
; BULK-MEM-NEXT: i32.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]
; BULK-MEM-NEXT: i32.const $push[[L3:[0-9]+]]=, 8
; BULK-MEM-NEXT: i32.const $push[[L3:[0-9]+]]=, 12
; BULK-MEM-NEXT: i32.add $push[[L4:[0-9]+]]=, $pop[[L2]], $pop[[L3]]
; BULK-MEM-NEXT: i32.const $push[[L5:[0-9]+]]=, 100
; BULK-MEM-NEXT: memory.fill 0, $pop[[L4]], $0, $pop[[L5]]
Expand Down
6 changes: 3 additions & 3 deletions llvm/test/CodeGen/WebAssembly/bulk-memory64.ll
Expand Up @@ -157,7 +157,7 @@ define void @memset_1024(ptr %dest, i8 %val) {
; BULK-MEM-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer
; BULK-MEM-NEXT: i64.const $push[[L1:[0-9]+]]=, 112
; BULK-MEM-NEXT: i64.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]
; BULK-MEM-NEXT: i64.const $push[[L3:[0-9]+]]=, 8
; BULK-MEM-NEXT: i64.const $push[[L3:[0-9]+]]=, 12
; BULK-MEM-NEXT: i64.add $push[[L4:[0-9]+]]=, $pop[[L2]], $pop[[L3]]
; BULK-MEM-NEXT: i64.const $push[[L5:[0-9]+]]=, 100
; BULK-MEM-NEXT: memory.copy 0, 0, $0, $pop[[L4]], $pop[[L5]]
Expand All @@ -174,7 +174,7 @@ define void @memcpy_alloca_src(ptr %dst) {
; BULK-MEM-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer
; BULK-MEM-NEXT: i64.const $push[[L1:[0-9]+]]=, 112
; BULK-MEM-NEXT: i64.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]
; BULK-MEM-NEXT: i64.const $push[[L3:[0-9]+]]=, 8
; BULK-MEM-NEXT: i64.const $push[[L3:[0-9]+]]=, 12
; BULK-MEM-NEXT: i64.add $push[[L4:[0-9]+]]=, $pop[[L2]], $pop[[L3]]
; BULK-MEM-NEXT: i64.const $push[[L5:[0-9]+]]=, 100
; BULK-MEM-NEXT: memory.copy 0, 0, $pop[[L4]], $0, $pop[[L5]]
Expand All @@ -191,7 +191,7 @@ define void @memcpy_alloca_dst(ptr %src) {
; BULK-MEM-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer
; BULK-MEM-NEXT: i64.const $push[[L1:[0-9]+]]=, 112
; BULK-MEM-NEXT: i64.sub $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]
; BULK-MEM-NEXT: i64.const $push[[L3:[0-9]+]]=, 8
; BULK-MEM-NEXT: i64.const $push[[L3:[0-9]+]]=, 12
; BULK-MEM-NEXT: i64.add $push[[L4:[0-9]+]]=, $pop[[L2]], $pop[[L3]]
; BULK-MEM-NEXT: i64.const $push[[L5:[0-9]+]]=, 100
; BULK-MEM-NEXT: memory.fill 0, $pop[[L4]], $0, $pop[[L5]]
Expand Down
Expand Up @@ -134,7 +134,7 @@ define i64 @test_return_i2(i64 %i.coerce) {
; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval
; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.i
; ALL: G_STORE [[COPY]](s64), [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.0, align 4)
; ALL: G_MEMCPY [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store (s8) into %ir.1, align 8), (load (s8) from %ir.2, align 8)
; ALL: G_MEMCPY [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store (s8) into %ir.1, align 4), (load (s8) from %ir.2, align 4)
; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s64) from %ir.3, align 4)
; ALL: $rax = COPY [[LOAD]](s64)
; ALL: RET 0, implicit $rax
Expand Down Expand Up @@ -166,9 +166,9 @@ define { i64, i32 } @test_return_i3(i64 %i.coerce0, i32 %i.coerce1) {
; ALL: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; ALL: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX2]], [[C1]](s64)
; ALL: G_STORE [[COPY1]](s32), [[PTR_ADD]](p0) :: (store (s32) into %ir.1)
; ALL: G_MEMCPY [[FRAME_INDEX1]](p0), [[FRAME_INDEX2]](p0), [[C]](s64), 0 :: (store (s8) into %ir.2, align 8), (load (s8) from %ir.3, align 8)
; ALL: G_MEMCPY [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store (s8) into %ir.4, align 8), (load (s8) from %ir.5, align 8)
; ALL: G_MEMCPY [[FRAME_INDEX3]](p0), [[FRAME_INDEX]](p0), [[C]](s64), 0 :: (store (s8) into %ir.6, align 8), (load (s8) from %ir.7, align 8)
; ALL: G_MEMCPY [[FRAME_INDEX1]](p0), [[FRAME_INDEX2]](p0), [[C]](s64), 0 :: (store (s8) into %ir.2, align 4), (load (s8) from %ir.3, align 4)
; ALL: G_MEMCPY [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store (s8) into %ir.4, align 4), (load (s8) from %ir.5, align 4)
; ALL: G_MEMCPY [[FRAME_INDEX3]](p0), [[FRAME_INDEX]](p0), [[C]](s64), 0 :: (store (s8) into %ir.6, align 8), (load (s8) from %ir.7, align 4)
; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX3]](p0) :: (dereferenceable load (s64) from %ir.tmp)
; ALL: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX3]], [[C1]](s64)
; ALL: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (dereferenceable load (s32) from %ir.tmp + 8, align 8)
Expand Down Expand Up @@ -210,7 +210,7 @@ define { i64, i64 } @test_return_i4(i64 %i.coerce0, i64 %i.coerce1) {
; ALL: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; ALL: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX1]], [[C1]](s64)
; ALL: G_STORE [[COPY1]](s64), [[PTR_ADD]](p0) :: (store (s64) into %ir.2, align 4)
; ALL: G_MEMCPY [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store (s8) into %ir.3, align 8), (load (s8) from %ir.4, align 8)
; ALL: G_MEMCPY [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store (s8) into %ir.3, align 4), (load (s8) from %ir.4, align 4)
; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s64) from %ir.5, align 4)
; ALL: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C1]](s64)
; ALL: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (dereferenceable load (s64) from %ir.5 + 8, align 4)
Expand Down

This file was deleted.

0 comments on commit 08533f8

Please sign in to comment.