Skip to content

Commit

Permalink
[DPWBS-1041] Infer alignment of objects passed through the stack duri…
Browse files Browse the repository at this point in the history
…ng call lowering.

The function inferAlignmentFromPtrInfo can infer the alignment for fixed stack objects,
however for outgoing arguments which are passed through the "normal" stack, it currently
fails. Since that function has no access to the TargetFrameLowering, we infer the
alignment ourselves.
  • Loading branch information
konstantinschwarz committed Mar 23, 2020
1 parent db47148 commit 51ac589
Show file tree
Hide file tree
Showing 3 changed files with 47 additions and 42 deletions.
11 changes: 8 additions & 3 deletions llvm/lib/Target/TriCore/TriCoreCallLowering.cpp
Expand Up @@ -67,10 +67,11 @@ struct IncomingArgHandler : public CallLowering::ValueHandler {

void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
MachinePointerInfo &MPO, CCValAssign &VA) override {
// FIXME: Get alignment
MachineFunction &MF = MIRBuilder.getMF();
unsigned Align = inferAlignmentFromPtrInfo(MF, MPO);
auto MMO = MIRBuilder.getMF().getMachineMemOperand(
MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, Size,
1);
Align);
MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
}

Expand Down Expand Up @@ -147,8 +148,12 @@ struct OutgoingValueHandler : public CallLowering::ValueHandler {

void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
MachinePointerInfo &MPO, CCValAssign &VA) override {
MachineFunction &MF = MIRBuilder.getMF();
const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering();
unsigned Align = MinAlign(TFL->getStackAlignment(), MPO.Offset);

auto MMO = MIRBuilder.getMF().getMachineMemOperand(
MPO, MachineMemOperand::MOStore, Size, 1);
MPO, MachineMemOperand::MOStore, Size, Align);
MIRBuilder.buildStore(ValVReg, Addr, *MMO);
}

Expand Down
48 changes: 24 additions & 24 deletions llvm/test/CodeGen/TriCore/GlobalIsel/call-translator.ll
Expand Up @@ -213,19 +213,19 @@ define void @test_call_stack(i64 %e4, i64 %e6) {
; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $a10
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C3]](s32)
; CHECK: G_STORE [[C]](s32), [[PTR_ADD]](p0) :: (store 4 into stack, align 1)
; CHECK: G_STORE [[C]](s32), [[PTR_ADD]](p0) :: (store 4 into stack, align 8)
; CHECK: [[COPY3:%[0-9]+]]:_(p0) = COPY $a10
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY3]], [[C4]](s32)
; CHECK: G_STORE [[C1]](s64), [[PTR_ADD1]](p0) :: (store 8 into stack + 4, align 1)
; CHECK: G_STORE [[C1]](s64), [[PTR_ADD1]](p0) :: (store 8 into stack + 4, align 4)
; CHECK: $a4 = COPY [[INTTOPTR]](p0)
; CHECK: $a5 = COPY [[INTTOPTR]](p0)
; CHECK: $a6 = COPY [[INTTOPTR]](p0)
; CHECK: $a7 = COPY [[INTTOPTR]](p0)
; CHECK: [[COPY4:%[0-9]+]]:_(p0) = COPY $a10
; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY4]], [[C5]](s32)
; CHECK: G_STORE [[INTTOPTR]](p0), [[PTR_ADD2]](p0) :: (store 4 into stack + 12, align 1)
; CHECK: G_STORE [[INTTOPTR]](p0), [[PTR_ADD2]](p0) :: (store 4 into stack + 12)
; CHECK: CALL @test_stack_slots, csr_tricore_uppercontext, implicit-def $a11, implicit $psw, implicit $e4, implicit $e6, implicit $a4, implicit $a5, implicit $a6, implicit $a7
; CHECK: ADJCALLSTACKUP 16, 0, implicit-def $a10, implicit $a10
; CHECK: RET implicit $a11
Expand All @@ -249,11 +249,11 @@ define void @test_call_stack2([2 x i32] %str) {
; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $a10
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C]](s32)
; CHECK: G_STORE [[COPY]](s32), [[PTR_ADD]](p0) :: (store 4 into stack, align 1)
; CHECK: G_STORE [[COPY]](s32), [[PTR_ADD]](p0) :: (store 4 into stack, align 8)
; CHECK: [[COPY3:%[0-9]+]]:_(p0) = COPY $a10
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY3]], [[C1]](s32)
; CHECK: G_STORE [[COPY1]](s32), [[PTR_ADD1]](p0) :: (store 4 into stack + 4, align 1)
; CHECK: G_STORE [[COPY1]](s32), [[PTR_ADD1]](p0) :: (store 4 into stack + 4)
; CHECK: CALL @take_struct_stack, csr_tricore_uppercontext, implicit-def $a11, implicit $psw, implicit $d4, implicit $d5, implicit $d6, implicit $d7
; CHECK: ADJCALLSTACKUP 8, 0, implicit-def $a10, implicit $a10
; CHECK: RET implicit $a11
Expand All @@ -278,11 +278,11 @@ define void @test_call_stack3(i64 %e4, i64 %e6) {
; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $a10
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C2]](s32)
; CHECK: G_STORE [[C]](s8), [[PTR_ADD]](p0) :: (store 1 into stack)
; CHECK: G_STORE [[C]](s8), [[PTR_ADD]](p0) :: (store 1 into stack, align 8)
; CHECK: [[COPY3:%[0-9]+]]:_(p0) = COPY $a10
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY3]], [[C3]](s32)
; CHECK: G_STORE [[C1]](s16), [[PTR_ADD1]](p0) :: (store 2 into stack + 4, align 1)
; CHECK: G_STORE [[C1]](s16), [[PTR_ADD1]](p0) :: (store 2 into stack + 4, align 4)
; CHECK: CALL @test_stack_slots_ext, csr_tricore_uppercontext, implicit-def $a11, implicit $psw, implicit $e4, implicit $e6
; CHECK: ADJCALLSTACKUP 8, 0, implicit-def $a10, implicit $a10
; CHECK: ADJCALLSTACKDOWN 8, 0, implicit-def $a10, implicit $a10
Expand All @@ -291,11 +291,11 @@ define void @test_call_stack3(i64 %e4, i64 %e6) {
; CHECK: [[COPY4:%[0-9]+]]:_(p0) = COPY $a10
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY4]], [[C4]](s32)
; CHECK: G_STORE [[C]](s8), [[PTR_ADD2]](p0) :: (store 1 into stack)
; CHECK: G_STORE [[C]](s8), [[PTR_ADD2]](p0) :: (store 1 into stack, align 8)
; CHECK: [[COPY5:%[0-9]+]]:_(p0) = COPY $a10
; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY5]], [[C5]](s32)
; CHECK: G_STORE [[C1]](s16), [[PTR_ADD3]](p0) :: (store 2 into stack + 4, align 1)
; CHECK: G_STORE [[C1]](s16), [[PTR_ADD3]](p0) :: (store 2 into stack + 4, align 4)
; CHECK: CALL @test_stack_slots_ext, csr_tricore_uppercontext, implicit-def $a11, implicit $psw, implicit $e4, implicit $e6
; CHECK: ADJCALLSTACKUP 8, 0, implicit-def $a10, implicit $a10
; CHECK: ADJCALLSTACKDOWN 8, 0, implicit-def $a10, implicit $a10
Expand All @@ -304,11 +304,11 @@ define void @test_call_stack3(i64 %e4, i64 %e6) {
; CHECK: [[COPY6:%[0-9]+]]:_(p0) = COPY $a10
; CHECK: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY6]], [[C6]](s32)
; CHECK: G_STORE [[C]](s8), [[PTR_ADD4]](p0) :: (store 1 into stack)
; CHECK: G_STORE [[C]](s8), [[PTR_ADD4]](p0) :: (store 1 into stack, align 8)
; CHECK: [[COPY7:%[0-9]+]]:_(p0) = COPY $a10
; CHECK: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY7]], [[C7]](s32)
; CHECK: G_STORE [[C1]](s16), [[PTR_ADD5]](p0) :: (store 2 into stack + 4, align 1)
; CHECK: G_STORE [[C1]](s16), [[PTR_ADD5]](p0) :: (store 2 into stack + 4, align 4)
; CHECK: CALL @test_stack_slots_ext, csr_tricore_uppercontext, implicit-def $a11, implicit $psw, implicit $e4, implicit $e6
; CHECK: ADJCALLSTACKUP 8, 0, implicit-def $a10, implicit $a10
; CHECK: RET implicit $a11
Expand Down Expand Up @@ -340,31 +340,31 @@ define void @test_call_stack_mixed(i64 %e4, i64 %e6) {
; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $a10
; CHECK: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C6]](s32)
; CHECK: G_STORE [[C]](s32), [[PTR_ADD]](p0) :: (store 4 into stack, align 1)
; CHECK: G_STORE [[C]](s32), [[PTR_ADD]](p0) :: (store 4 into stack, align 8)
; CHECK: [[COPY3:%[0-9]+]]:_(p0) = COPY $a10
; CHECK: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY3]], [[C7]](s32)
; CHECK: G_STORE [[C1]](s16), [[PTR_ADD1]](p0) :: (store 2 into stack + 4, align 1)
; CHECK: G_STORE [[C1]](s16), [[PTR_ADD1]](p0) :: (store 2 into stack + 4, align 4)
; CHECK: [[COPY4:%[0-9]+]]:_(p0) = COPY $a10
; CHECK: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY4]], [[C8]](s32)
; CHECK: G_STORE [[C2]](s32), [[PTR_ADD2]](p0) :: (store 4 into stack + 8, align 1)
; CHECK: G_STORE [[C2]](s32), [[PTR_ADD2]](p0) :: (store 4 into stack + 8, align 8)
; CHECK: [[COPY5:%[0-9]+]]:_(p0) = COPY $a10
; CHECK: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
; CHECK: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY5]], [[C9]](s32)
; CHECK: G_STORE [[C3]](s64), [[PTR_ADD3]](p0) :: (store 8 into stack + 12, align 1)
; CHECK: G_STORE [[C3]](s64), [[PTR_ADD3]](p0) :: (store 8 into stack + 12, align 4)
; CHECK: [[COPY6:%[0-9]+]]:_(p0) = COPY $a10
; CHECK: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
; CHECK: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY6]], [[C10]](s32)
; CHECK: G_STORE [[C4]](s64), [[PTR_ADD4]](p0) :: (store 8 into stack + 20, align 1)
; CHECK: G_STORE [[C4]](s64), [[PTR_ADD4]](p0) :: (store 8 into stack + 20, align 4)
; CHECK: $a4 = COPY [[INTTOPTR]](p0)
; CHECK: $a5 = COPY [[INTTOPTR]](p0)
; CHECK: $a6 = COPY [[INTTOPTR]](p0)
; CHECK: $a7 = COPY [[INTTOPTR]](p0)
; CHECK: [[COPY7:%[0-9]+]]:_(p0) = COPY $a10
; CHECK: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
; CHECK: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY7]], [[C11]](s32)
; CHECK: G_STORE [[INTTOPTR]](p0), [[PTR_ADD5]](p0) :: (store 4 into stack + 28, align 1)
; CHECK: G_STORE [[INTTOPTR]](p0), [[PTR_ADD5]](p0) :: (store 4 into stack + 28)
; CHECK: CALL @test_stack_slots_mixed, csr_tricore_uppercontext, implicit-def $a11, implicit $psw, implicit $e4, implicit $e6, implicit $a4, implicit $a5, implicit $a6, implicit $a7
; CHECK: ADJCALLSTACKUP 32, 0, implicit-def $a10, implicit $a10
; CHECK: RET implicit $a11
Expand Down Expand Up @@ -538,31 +538,31 @@ define void @test_varargs() {
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $a10
; CHECK: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C9]](s32)
; CHECK: G_STORE [[C2]](s8), [[PTR_ADD]](p0) :: (store 1 into stack)
; CHECK: G_STORE [[C2]](s8), [[PTR_ADD]](p0) :: (store 1 into stack, align 8)
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $a10
; CHECK: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C10]](s32)
; CHECK: G_STORE [[C3]](s16), [[PTR_ADD1]](p0) :: (store 2 into stack + 4, align 1)
; CHECK: G_STORE [[C3]](s16), [[PTR_ADD1]](p0) :: (store 2 into stack + 4, align 4)
; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $a10
; CHECK: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C11]](s32)
; CHECK: G_STORE [[C4]](s32), [[PTR_ADD2]](p0) :: (store 4 into stack + 8, align 1)
; CHECK: G_STORE [[C4]](s32), [[PTR_ADD2]](p0) :: (store 4 into stack + 8, align 8)
; CHECK: [[COPY3:%[0-9]+]]:_(p0) = COPY $a10
; CHECK: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
; CHECK: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY3]], [[C12]](s32)
; CHECK: G_STORE [[C5]](s64), [[PTR_ADD3]](p0) :: (store 8 into stack + 12, align 1)
; CHECK: G_STORE [[C5]](s64), [[PTR_ADD3]](p0) :: (store 8 into stack + 12, align 4)
; CHECK: [[COPY4:%[0-9]+]]:_(p0) = COPY $a10
; CHECK: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
; CHECK: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY4]], [[C13]](s32)
; CHECK: G_STORE [[C6]](s16), [[PTR_ADD4]](p0) :: (store 2 into stack + 20, align 1)
; CHECK: G_STORE [[C6]](s16), [[PTR_ADD4]](p0) :: (store 2 into stack + 20, align 4)
; CHECK: [[COPY5:%[0-9]+]]:_(p0) = COPY $a10
; CHECK: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY5]], [[C14]](s32)
; CHECK: G_STORE [[C7]](s32), [[PTR_ADD5]](p0) :: (store 4 into stack + 24, align 1)
; CHECK: G_STORE [[C7]](s32), [[PTR_ADD5]](p0) :: (store 4 into stack + 24, align 8)
; CHECK: [[COPY6:%[0-9]+]]:_(p0) = COPY $a10
; CHECK: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
; CHECK: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY6]], [[C15]](s32)
; CHECK: G_STORE [[C8]](s64), [[PTR_ADD6]](p0) :: (store 8 into stack + 28, align 1)
; CHECK: G_STORE [[C8]](s64), [[PTR_ADD6]](p0) :: (store 8 into stack + 28, align 4)
; CHECK: CALL @varargs, csr_tricore_uppercontext, implicit-def $a11, implicit $psw, implicit $e4, implicit $d6
; CHECK: ADJCALLSTACKUP 36, 0, implicit-def $a10, implicit $a10
; CHECK: RET implicit $a11
Expand Down
30 changes: 15 additions & 15 deletions llvm/test/CodeGen/TriCore/GlobalIsel/callingconv.ll
Expand Up @@ -284,9 +284,9 @@ define i32 @args_stack1(i32 %d4, i32 %d5, i32 %d6, i32 %d7, i32 %stack1, i64 %st
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $d6
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $d7
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 1)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 8)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; CHECK: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 8 from %fixed-stack.0, align 1)
; CHECK: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 8 from %fixed-stack.0, align 4)
; CHECK: $d2 = COPY [[LOAD]](s32)
; CHECK: RET implicit $a11, implicit $d2
entry:
Expand All @@ -302,11 +302,11 @@ define i32 @args_stack2(i32 %d4, i32 %d5, i32 %d6, i32 %d7, [2 x i32] %stack1, i
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $d6
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $d7
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.2, align 1)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.2, align 8)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.1, align 1)
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.1)
; CHECK: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; CHECK: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p0) :: (invariant load 4 from %fixed-stack.0, align 1)
; CHECK: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p0) :: (invariant load 4 from %fixed-stack.0, align 8)
; CHECK: $d2 = COPY [[LOAD]](s32)
; CHECK: RET implicit $a11, implicit $d2
entry:
Expand All @@ -322,9 +322,9 @@ define i32 @args_stack3(i32 %d4, i32 %d5, i32 %d6, [2 x i32] %stack1, i32 %d7) {
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $d5
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $d6
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 1)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 8)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0, align 1)
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0)
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $d7
; CHECK: $d2 = COPY [[LOAD]](s32)
; CHECK: RET implicit $a11, implicit $d2
Expand All @@ -346,9 +346,9 @@ define i32* @args_stack4(i32* %a4, i32 %d4, i32* %a5, i32 %d5, i32* %a6, i32 %d6
; CHECK: [[COPY6:%[0-9]+]]:_(p0) = COPY $a7
; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $d7
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 1)
; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 8)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0, align 1)
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0)
; CHECK: $a2 = COPY [[LOAD]](p0)
; CHECK: RET implicit $a11, implicit $a2
entry:
Expand All @@ -364,11 +364,11 @@ define zeroext i8 @args_stack_mixed(i32 %d4, i32 %d5, i32 %d6, i32 %d7, i1 zeroe
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $d6
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $d7
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
; CHECK: [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 1 from %fixed-stack.2)
; CHECK: [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 1 from %fixed-stack.2, align 8)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
; CHECK: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.1)
; CHECK: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.1, align 4)
; CHECK: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; CHECK: [[LOAD2:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX2]](p0) :: (invariant load 2 from %fixed-stack.0, align 1)
; CHECK: [[LOAD2:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX2]](p0) :: (invariant load 2 from %fixed-stack.0, align 8)
; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s8)
; CHECK: $d2 = COPY [[ZEXT]](s32)
; CHECK: RET implicit $a11, implicit $d2
Expand All @@ -385,11 +385,11 @@ define half @args_stack_floats_mixed(half %d4, float %d5, double %e6, half %stac
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $d5
; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $e6
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 2 from %fixed-stack.2, align 1)
; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 2 from %fixed-stack.2, align 8)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.1, align 1)
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.1)
; CHECK: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; CHECK: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX2]](p0) :: (invariant load 8 from %fixed-stack.0, align 1)
; CHECK: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX2]](p0) :: (invariant load 8 from %fixed-stack.0)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s16)
; CHECK: $d2 = COPY [[ANYEXT]](s32)
; CHECK: RET implicit $a11, implicit $d2
Expand Down

0 comments on commit 51ac589

Please sign in to comment.