Skip to content

Commit

Permalink
[ARM][LowOverheadLoops] Update liveness info
Browse files Browse the repository at this point in the history
After expanding the pseudo instructions, update the liveness info.
We do this in a post-order traversal of the loop, including its
exit blocks and preheader(s).

Differential Revision: https://reviews.llvm.org/D72131
  • Loading branch information
sparker-arm committed Jan 9, 2020
1 parent 2d515e4 commit e93e0d4
Show file tree
Hide file tree
Showing 15 changed files with 180 additions and 108 deletions.
13 changes: 13 additions & 0 deletions llvm/lib/CodeGen/LivePhysRegs.cpp
Expand Up @@ -276,6 +276,7 @@ void llvm::recomputeLivenessFlags(MachineBasicBlock &MBB) {
const MachineFunction &MF = *MBB.getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
const MachineFrameInfo &MFI = MF.getFrameInfo();

// We walk through the block backwards and start with the live outs.
LivePhysRegs LiveRegs;
Expand All @@ -294,6 +295,18 @@ void llvm::recomputeLivenessFlags(MachineBasicBlock &MBB) {
assert(Register::isPhysicalRegister(Reg));

bool IsNotLive = LiveRegs.available(MRI, Reg);

// Special-case return instructions for cases when a return is not
// the last instruction in the block.
if (MI.isReturn() && MFI.isCalleeSavedInfoValid()) {
for (const CalleeSavedInfo &Info : MFI.getCalleeSavedInfo()) {
if (Info.getReg() == Reg) {
IsNotLive = !Info.isRestored();
break;
}
}
}

MO->setIsDead(IsNotLive);
}

Expand Down
64 changes: 64 additions & 0 deletions llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp
Expand Up @@ -43,6 +43,61 @@ using namespace llvm;

namespace {

class PostOrderLoopTraversal {
MachineLoop &ML;
MachineLoopInfo &MLI;
SmallPtrSet<MachineBasicBlock*, 4> Visited;
SmallVector<MachineBasicBlock*, 4> Order;

public:
PostOrderLoopTraversal(MachineLoop &ML, MachineLoopInfo &MLI)
: ML(ML), MLI(MLI) { }

const SmallVectorImpl<MachineBasicBlock*> &getOrder() const {
return Order;
}

// Visit all the blocks within the loop, as well as exit blocks and any
// blocks properly dominating the header.
void ProcessLoop() {
std::function<void(MachineBasicBlock*)> Search = [this, &Search]
(MachineBasicBlock *MBB) -> void {
if (Visited.count(MBB))
return;

Visited.insert(MBB);
for (auto *Succ : MBB->successors()) {
if (!ML.contains(Succ))
continue;
Search(Succ);
}
Order.push_back(MBB);
};

// Insert exit blocks.
SmallVector<MachineBasicBlock*, 2> ExitBlocks;
ML.getExitBlocks(ExitBlocks);
for (auto *MBB : ExitBlocks)
Order.push_back(MBB);

// Then add the loop body.
Search(ML.getHeader());

// Then try the preheader and its predecessors.
std::function<void(MachineBasicBlock*)> GetPredecessor =
[this, &GetPredecessor] (MachineBasicBlock *MBB) -> void {
Order.push_back(MBB);
if (MBB->pred_size() == 1)
GetPredecessor(*MBB->pred_begin());
};

if (auto *Preheader = ML.getLoopPreheader())
GetPredecessor(Preheader);
else if (auto *Preheader = MLI.findLoopPreheader(&ML, true))
GetPredecessor(Preheader);
}
};

struct PredicatedMI {
MachineInstr *MI = nullptr;
SetVector<MachineInstr*> Predicates;
Expand Down Expand Up @@ -976,6 +1031,15 @@ void ARMLowOverheadLoops::Expand(LowOverheadLoop &LoLoop) {
ConvertVPTBlocks(LoLoop);
}
}

PostOrderLoopTraversal DFS(*LoLoop.ML, *MLI);
DFS.ProcessLoop();
const SmallVectorImpl<MachineBasicBlock*> &PostOrder = DFS.getOrder();
for (auto *MBB : PostOrder)
recomputeLiveIns(*MBB);

for (auto *MBB : reverse(PostOrder))
recomputeLivenessFlags(*MBB);
}

bool ARMLowOverheadLoops::RevertNonLoops() {
Expand Down
99 changes: 47 additions & 52 deletions llvm/test/CodeGen/Thumb2/LowOverheadLoops/disjoint-vcmp.mir
@@ -1,8 +1,8 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops --verify-machineinstrs %s -o - | FileCheck %s
#
--- |
@mask = external global i16
; Function Attrs: nofree norecurse nounwind
define dso_local void @test(i32* noalias nocapture %arg, i32* noalias nocapture readonly %arg1, i32 %arg2, i32* noalias nocapture readonly %arg3) local_unnamed_addr #0 {
bb:
%tmp = icmp eq i32 %arg2, 0
Expand All @@ -16,8 +16,6 @@
%mask.load = load i16, i16* %mask.gep9
%conv.mask = zext i16 %mask.load to i32
%invariant.mask = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %conv.mask)
%mask.insert = insertelement <4 x i32> undef, i32 %conv.mask, i32 0
%invariant.limits = shufflevector <4 x i32> %mask.insert, <4 x i32> undef, <4 x i32> zeroinitializer
br i1 %tmp, label %bb27, label %bb3

bb3: ; preds = %bb
Expand All @@ -31,18 +29,20 @@
%lsr.iv = phi i32* [ %scevgep, %bb9 ], [ %arg, %bb3 ]
%tmp7 = phi i32 [ %tmp6, %bb3 ], [ %tmp12, %bb9 ]
%tmp8 = phi i32 [ %arg2, %bb3 ], [ %tmp11, %bb9 ]
%lsr.iv47 = bitcast i32* %lsr.iv4 to <4 x i32>*
%lsr.iv1 = bitcast i32* %lsr.iv to <4 x i32>*
%lsr.iv24 = bitcast i32* %lsr.iv2 to <4 x i32>*
%lsr.iv47 = bitcast i32* %lsr.iv4 to <4 x i32>*
%vctp = call <4 x i1> @llvm.arm.mve.vctp32(i32 %tmp8)
%and = and <4 x i1> %vctp, %invariant.mask
%tmp11 = sub i32 %tmp8, 4
%tmp17 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv24, i32 4, <4 x i1> %and, <4 x i32> undef)
%tmp22 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv1, i32 4, <4 x i1> %and, <4 x i32> undef)
%tmp23 = mul nsw <4 x i32> %tmp22, %tmp17
%scevgep8 = getelementptr <4 x i32>, <4 x i32>* %lsr.iv47, i32 1
%load.limits = load <4 x i32>, <4 x i32>* %scevgep8
%bad.icmp = icmp ule <4 x i32> %load.limits, %invariant.limits
%scevgep2 = getelementptr <4 x i32>, <4 x i32>* %lsr.iv47, i32 1
%load.limits = load <4 x i32>, <4 x i32>* %scevgep2
%0 = insertelement <4 x i32> undef, i32 %conv.mask, i32 0
%1 = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> zeroinitializer
%bad.icmp = icmp ule <4 x i32> %load.limits, %1
call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %tmp23, <4 x i32>* %lsr.iv1, i32 4, <4 x i1> %bad.icmp)
%tmp12 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %tmp7, i32 1)
%tmp13 = icmp ne i32 %tmp12, 0
Expand All @@ -54,13 +54,12 @@
bb27: ; preds = %bb9, %bb
ret void
}
declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>) #1
declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>) #2
declare void @llvm.set.loop.iterations.i32(i32) #3
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #3
declare <4 x i1> @llvm.arm.mve.vctp32(i32) #4
declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) #4
declare void @llvm.stackprotector(i8*, i8**) #5
declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
declare void @llvm.set.loop.iterations.i32(i32)
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
declare <4 x i1> @llvm.arm.mve.vctp32(i32)
declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32)

...
---
Expand All @@ -85,7 +84,7 @@ frameInfo:
hasStackMap: false
hasPatchPoint: false
stackSize: 20
offsetAdjustment: -12
offsetAdjustment: 0
maxAlignment: 4
adjustsStack: false
hasCalls: false
Expand All @@ -110,7 +109,7 @@ stack:
stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
- { id: 3, name: '', type: spill-slot, offset: -12, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '$r6', callee-saved-restored: true,
stack-id: default, callee-saved-register: '$r5', callee-saved-restored: true,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
- { id: 4, name: '', type: spill-slot, offset: -16, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true,
Expand All @@ -122,84 +121,81 @@ body: |
; CHECK-LABEL: name: test
; CHECK: bb.0.bb:
; CHECK: successors: %bb.3(0x30000000), %bb.1(0x50000000)
; CHECK: liveins: $r0, $r1, $r2, $r3, $r4, $r6, $lr
; CHECK: frame-setup tPUSH 14, $noreg, killed $r4, killed $r6, killed $lr, implicit-def $sp, implicit $sp
; CHECK: liveins: $r3, $r2, $r0, $r1, $r4, $r5, $r7, $lr
; CHECK: frame-setup tPUSH 14, $noreg, killed $r4, killed $r5, killed $r7, killed $lr, implicit-def $sp, implicit $sp
; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 16
; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
; CHECK: frame-setup CFI_INSTRUCTION offset $r6, -12
; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -12
; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -16
; CHECK: $r7 = frame-setup tADDrSPi $sp, 2, 14, $noreg
; CHECK: frame-setup CFI_INSTRUCTION def_cfa $r7, 8
; CHECK: $sp = frame-setup tSUBspi $sp, 1, 14, $noreg
; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 20
; CHECK: tCBZ $r2, %bb.3
; CHECK: bb.1.bb3:
; CHECK: successors: %bb.2(0x80000000)
; CHECK: liveins: $r0, $r1, $r2, $r3
; CHECK: liveins: $r3, $r2, $r0, $r1
; CHECK: $r12 = t2MOVi16 target-flags(arm-lo16) @mask, 14, $noreg
; CHECK: renamable $lr = t2ADDri renamable $r2, 3, 14, $noreg, $noreg
; CHECK: renamable $r4, dead $cpsr = tADDi3 renamable $r2, 3, 14, $noreg
; CHECK: $r12 = t2MOVTi16 killed $r12, target-flags(arm-hi16) @mask, 14, $noreg
; CHECK: renamable $lr = t2BICri killed renamable $lr, 3, 14, $noreg, $noreg
; CHECK: renamable $r12 = t2LDRHi12 killed renamable $r12, 0, 14, $noreg :: (dereferenceable load 2 from %ir.mask.gep9)
; CHECK: renamable $lr = t2SUBri killed renamable $lr, 4, 14, $noreg, $noreg
; CHECK: renamable $r4 = t2BICri killed renamable $r4, 3, 14, $noreg, $noreg
; CHECK: renamable $r5 = t2LDRHi12 killed renamable $r12, 0, 14, $noreg :: (dereferenceable load 2 from %ir.mask.gep9)
; CHECK: renamable $r12 = t2SUBri killed renamable $r4, 4, 14, $noreg, $noreg
; CHECK: renamable $r4, dead $cpsr = tMOVi8 1, 14, $noreg
; CHECK: $vpr = VMSR_P0 $r12, 14, $noreg
; CHECK: renamable $q0 = MVE_VDUP32 killed renamable $r12, 0, $noreg, undef renamable $q0
; CHECK: $vpr = VMSR_P0 $r5, 14, $noreg
; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $r4, killed renamable $r12, 19, 14, $noreg, $noreg
; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 16, 14, $noreg, $noreg
; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $r4, killed renamable $lr, 19, 14, $noreg, $noreg
; CHECK: VSTR_P0_off killed renamable $vpr, $sp, 0, 14, $noreg :: (store 4 into %stack.0)
; CHECK: renamable $q0 = MVE_VDUP32 killed renamable $r5, 0, $noreg, undef renamable $q0
; CHECK: $r3 = tMOVr $r0, 14, $noreg
; CHECK: $lr = t2DLS renamable $lr
; CHECK: $lr = t2DLS killed renamable $lr
; CHECK: bb.2.bb9:
; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
; CHECK: liveins: $lr, $q0, $r0, $r1, $r2, $r3, $r12
; CHECK: liveins: $lr, $r2, $r12, $r0, $q0, $r1, $r3
; CHECK: renamable $vpr = VLDR_P0_off $sp, 0, 14, $noreg :: (load 4 from %stack.0)
; CHECK: MVE_VPST 2, implicit $vpr
; CHECK: renamable $vpr = MVE_VCTP32 renamable $r2, 1, killed renamable $vpr
; CHECK: renamable $r1, renamable $q1 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv24, align 4)
; CHECK: renamable $r3, renamable $q2 = MVE_VLDRWU32_post killed renamable $r3, 16, 1, killed renamable $vpr :: (load 16 from %ir.lsr.iv1, align 4)
; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14, $noreg
; CHECK: renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, undef renamable $q1
; CHECK: renamable $r12, renamable $q2 = MVE_VLDRWU32_pre killed renamable $r12, 16, 0, $noreg :: (load 16 from %ir.scevgep8, align 8)
; CHECK: renamable $vpr = MVE_VCMPu32 renamable $q0, killed renamable $q2, 2, 0, $noreg
; CHECK: MVE_VPST 8, implicit $vpr
; CHECK: renamable $r12, renamable $q2 = MVE_VLDRWU32_pre killed renamable $r12, 16, 0, $noreg :: (load 16 from %ir.scevgep2, align 8)
; CHECK: MVE_VPTv4u32 8, renamable $q0, killed renamable $q2, 2, implicit-def $vpr
; CHECK: MVE_VSTRWU32 killed renamable $q1, killed renamable $r0, 0, 1, killed renamable $vpr :: (store 16 into %ir.lsr.iv1, align 4)
; CHECK: $r0 = tMOVr $r3, 14, $noreg
; CHECK: $lr = t2LEUpdate renamable $lr, %bb.2
; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
; CHECK: bb.3.bb27:
; CHECK: $sp = tADDspi $sp, 1, 14, $noreg
; CHECK: tPOP_RET 14, $noreg, def $r4, def $r6, def $r7, def $pc
; CHECK: tPOP_RET 14, $noreg, def $r4, def $r5, def $r7, def $pc
bb.0.bb:
successors: %bb.3(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r4, $r6, $lr
liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r7, $lr
frame-setup tPUSH 14, $noreg, killed $r4, killed $r6, killed $lr, implicit-def $sp, implicit $sp
frame-setup tPUSH 14, $noreg, killed $r4, killed $r5, killed $r7, killed $lr, implicit-def $sp, implicit $sp
frame-setup CFI_INSTRUCTION def_cfa_offset 16
frame-setup CFI_INSTRUCTION offset $lr, -4
frame-setup CFI_INSTRUCTION offset $r7, -8
frame-setup CFI_INSTRUCTION offset $r6, -12
frame-setup CFI_INSTRUCTION offset $r5, -12
frame-setup CFI_INSTRUCTION offset $r4, -16
$r7 = frame-setup tADDrSPi $sp, 2, 14, $noreg
frame-setup CFI_INSTRUCTION def_cfa $r7, 8
$sp = frame-setup tSUBspi $sp, 1, 14, $noreg
frame-setup CFI_INSTRUCTION def_cfa_offset 20
tCBZ $r2, %bb.3
bb.1.bb3:
successors: %bb.2(0x80000000)
liveins: $r0, $r1, $r2, $r3
$r12 = t2MOVi16 target-flags(arm-lo16) @mask, 14, $noreg
renamable $lr = t2ADDri renamable $r2, 3, 14, $noreg, $noreg
renamable $r4, dead $cpsr = tADDi3 renamable $r2, 3, 14, $noreg
$r12 = t2MOVTi16 killed $r12, target-flags(arm-hi16) @mask, 14, $noreg
renamable $lr = t2BICri killed renamable $lr, 3, 14, $noreg, $noreg
renamable $r12 = t2LDRHi12 killed renamable $r12, 0, 14, $noreg :: (dereferenceable load 2 from %ir.mask.gep9)
renamable $lr = t2SUBri killed renamable $lr, 4, 14, $noreg, $noreg
renamable $r4 = t2BICri killed renamable $r4, 3, 14, $noreg, $noreg
renamable $r5 = t2LDRHi12 killed renamable $r12, 0, 14, $noreg :: (dereferenceable load 2 from %ir.mask.gep9)
renamable $r12 = t2SUBri killed renamable $r4, 4, 14, $noreg, $noreg
renamable $r4, dead $cpsr = tMOVi8 1, 14, $noreg
$vpr = VMSR_P0 $r12, 14, $noreg
renamable $q0 = MVE_VDUP32 killed renamable $r12, 0, $noreg, undef renamable $q0
$vpr = VMSR_P0 $r5, 14, $noreg
renamable $lr = nuw nsw t2ADDrs killed renamable $r4, killed renamable $r12, 19, 14, $noreg, $noreg
renamable $r12 = t2SUBri killed renamable $r3, 16, 14, $noreg, $noreg
renamable $lr = nuw nsw t2ADDrs killed renamable $r4, killed renamable $lr, 19, 14, $noreg, $noreg
VSTR_P0_off killed renamable $vpr, $sp, 0, 14, $noreg :: (store 4 into %stack.0)
renamable $q0 = MVE_VDUP32 killed renamable $r5, 0, $noreg, undef renamable $q0
$r3 = tMOVr $r0, 14, $noreg
t2DoLoopStart renamable $lr
Expand All @@ -214,9 +210,8 @@ body: |
renamable $r3, renamable $q2 = MVE_VLDRWU32_post killed renamable $r3, 16, 1, killed renamable $vpr :: (load 16 from %ir.lsr.iv1, align 4)
renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14, $noreg
renamable $q1 = nsw MVE_VMULi32 killed renamable $q2, killed renamable $q1, 0, $noreg, undef renamable $q1
renamable $r12, renamable $q2 = MVE_VLDRWU32_pre killed renamable $r12, 16, 0, $noreg :: (load 16 from %ir.scevgep8, align 8)
renamable $vpr = MVE_VCMPu32 renamable $q0, killed renamable $q2, 2, 0, $noreg
MVE_VPST 8, implicit $vpr
renamable $r12, renamable $q2 = MVE_VLDRWU32_pre killed renamable $r12, 16, 0, $noreg :: (load 16 from %ir.scevgep2, align 8)
MVE_VPTv4u32 8, renamable $q0, killed renamable $q2, 2, implicit-def $vpr
MVE_VSTRWU32 killed renamable $q1, killed renamable $r0, 0, 1, killed renamable $vpr :: (store 16 into %ir.lsr.iv1, align 4)
renamable $lr = t2LoopDec killed renamable $lr, 1
$r0 = tMOVr $r3, 14, $noreg
Expand All @@ -225,6 +220,6 @@ body: |
bb.3.bb27:
$sp = tADDspi $sp, 1, 14, $noreg
tPOP_RET 14, $noreg, def $r4, def $r6, def $r7, def $pc
tPOP_RET 14, $noreg, def $r4, def $r5, def $r7, def $pc
...
Expand Up @@ -6,7 +6,7 @@
# CHECK: bb.1.vector.body:
# CHECK: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14, $noreg
# CHECK: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14, $noreg
# CHECK: $lr = MVE_LETP renamable $lr, %bb.1
# CHECK: $lr = MVE_LETP killed renamable $lr, %bb.1

--- |
target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
Expand Down
Expand Up @@ -7,8 +7,8 @@
# We shouldn't optimise away the SUB.

# CHECK: bb.1.vector.body:
# CHECK: renamable $r3, $cpsr = tSUBi8 killed renamable $r3, 4, 14, $noreg
# CHECK: $lr = MVE_LETP renamable $lr, %bb.1
# CHECK: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14, $noreg
# CHECK: $lr = MVE_LETP killed renamable $lr, %bb.1

--- |
target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
Expand Down
Expand Up @@ -8,7 +8,7 @@

# CHECK: bb.1.vector.body:
# CHECK: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14, $noreg
# CHECK: $lr = MVE_LETP renamable $lr, %bb.1
# CHECK: $lr = MVE_LETP killed renamable $lr, %bb.1

--- |
target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
Expand Down

0 comments on commit e93e0d4

Please sign in to comment.