Navigation Menu

Skip to content

Commit

Permalink
[TargetLowering] Only inspect attributes in the arguments for ArgList…
Browse files Browse the repository at this point in the history
…Entry

Parameter attributes are considered part of the function [1], and like
mismatched calling conventions [2], we can't have the verifier check for
mismatched parameter attributes.

This is a reland after fixing MSan issues in D102667.

[1] https://llvm.org/docs/LangRef.html#parameter-attributes
[2] https://llvm.org/docs/FAQ.html#why-does-instcombine-simplifycfg-turn-a-call-to-a-function-with-a-mismatched-calling-convention-into-unreachable-why-not-make-the-verifier-reject-it

Reviewed By: rnk

Differential Revision: https://reviews.llvm.org/D101806
  • Loading branch information
aeubanks committed May 18, 2021
1 parent b86302e commit 1c7f323
Show file tree
Hide file tree
Showing 23 changed files with 185 additions and 129 deletions.
8 changes: 8 additions & 0 deletions llvm/docs/ReleaseNotes.rst
Expand Up @@ -64,6 +64,7 @@ Changes to the LLVM IR
* The opaque pointer type ``ptr`` has been introduced. It is still in the
process of being worked on and should not be used yet.

=======
Changes to building LLVM
------------------------

Expand All @@ -74,6 +75,13 @@ Changes to building LLVM
Changes to TableGen
-------------------

Changes to Backend Code Generation
----------------------------------

* When lowering calls, only ABI attributes on the call itself are checked, not
the caller. Frontends need to make sure to properly set ABI attributes on
calls (and always should have).

Changes to the ARM Backend
--------------------------

Expand Down
33 changes: 18 additions & 15 deletions llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
Expand Up @@ -102,29 +102,32 @@ bool TargetLowering::parametersInCSRMatch(const MachineRegisterInfo &MRI,
return true;
}

/// Set CallLoweringInfo attribute flags based on a call instruction
/// and called function attributes.
/// Set CallLoweringInfo attribute flags based on the call instruction's
/// argument attributes.
void TargetLoweringBase::ArgListEntry::setAttributes(const CallBase *Call,
unsigned ArgIdx) {
IsSExt = Call->paramHasAttr(ArgIdx, Attribute::SExt);
IsZExt = Call->paramHasAttr(ArgIdx, Attribute::ZExt);
IsInReg = Call->paramHasAttr(ArgIdx, Attribute::InReg);
IsSRet = Call->paramHasAttr(ArgIdx, Attribute::StructRet);
IsNest = Call->paramHasAttr(ArgIdx, Attribute::Nest);
IsByVal = Call->paramHasAttr(ArgIdx, Attribute::ByVal);
IsPreallocated = Call->paramHasAttr(ArgIdx, Attribute::Preallocated);
IsInAlloca = Call->paramHasAttr(ArgIdx, Attribute::InAlloca);
IsReturned = Call->paramHasAttr(ArgIdx, Attribute::Returned);
IsSwiftSelf = Call->paramHasAttr(ArgIdx, Attribute::SwiftSelf);
IsSwiftAsync = Call->paramHasAttr(ArgIdx, Attribute::SwiftAsync);
IsSwiftError = Call->paramHasAttr(ArgIdx, Attribute::SwiftError);
Alignment = Call->getParamStackAlign(ArgIdx);
auto Attrs = Call->getAttributes();

IsSExt = Attrs.hasParamAttribute(ArgIdx, Attribute::SExt);
IsZExt = Attrs.hasParamAttribute(ArgIdx, Attribute::ZExt);
IsInReg = Attrs.hasParamAttribute(ArgIdx, Attribute::InReg);
IsSRet = Attrs.hasParamAttribute(ArgIdx, Attribute::StructRet);
IsNest = Attrs.hasParamAttribute(ArgIdx, Attribute::Nest);
IsReturned = Attrs.hasParamAttribute(ArgIdx, Attribute::Returned);
IsSwiftSelf = Attrs.hasParamAttribute(ArgIdx, Attribute::SwiftSelf);
IsSwiftAsync = Attrs.hasParamAttribute(ArgIdx, Attribute::SwiftAsync);
IsSwiftError = Attrs.hasParamAttribute(ArgIdx, Attribute::SwiftError);
Alignment = Attrs.getParamStackAlignment(ArgIdx);

IsByVal = Attrs.hasParamAttribute(ArgIdx, Attribute::ByVal);
ByValType = nullptr;
if (IsByVal) {
ByValType = Call->getParamByValType(ArgIdx);
if (!Alignment)
Alignment = Call->getParamAlign(ArgIdx);
}
IsInAlloca = Attrs.hasParamAttribute(ArgIdx, Attribute::InAlloca);
IsPreallocated = Attrs.hasParamAttribute(ArgIdx, Attribute::Preallocated);
PreallocatedType = nullptr;
if (IsPreallocated)
PreallocatedType = Call->getParamPreallocatedType(ArgIdx);
Expand Down
14 changes: 7 additions & 7 deletions llvm/test/CodeGen/AArch64/arm64-this-return.ll
Expand Up @@ -38,9 +38,9 @@ entry:
; CHECK-NOT: mov x0, {{x[0-9]+}}
; CHECK: b {{_?B_ctor_base}}
%0 = bitcast %struct.C* %this to %struct.A*
%call = tail call %struct.A* @A_ctor_base(%struct.A* %0)
%call = tail call %struct.A* @A_ctor_base(%struct.A* returned %0)
%1 = getelementptr inbounds %struct.C, %struct.C* %this, i32 0, i32 0
%call2 = tail call %struct.B* @B_ctor_base(%struct.B* %1, i32 %x)
%call2 = tail call %struct.B* @B_ctor_base(%struct.B* returned %1, i32 %x)
ret %struct.C* %this
}

Expand Down Expand Up @@ -88,7 +88,7 @@ define %struct.C* @C_ctor_complete(%struct.C* %this, i32 %x) {
entry:
; CHECK-LABEL: C_ctor_complete:
; CHECK: b {{_?C_ctor_base}}
%call = tail call %struct.C* @C_ctor_base(%struct.C* %this, i32 %x)
%call = tail call %struct.C* @C_ctor_base(%struct.C* returned %this, i32 %x)
ret %struct.C* %this
}

Expand Down Expand Up @@ -135,8 +135,8 @@ entry:
; CHECK-NOT: mov x0, {{x[0-9]+}}
; CHECK: b {{_?B_ctor_complete}}
%b = getelementptr inbounds %struct.D, %struct.D* %this, i32 0, i32 0
%call = tail call %struct.B* @B_ctor_complete(%struct.B* %b, i32 %x)
%call2 = tail call %struct.B* @B_ctor_complete(%struct.B* %b, i32 %x)
%call = tail call %struct.B* @B_ctor_complete(%struct.B* returned %b, i32 %x)
%call2 = tail call %struct.B* @B_ctor_complete(%struct.B* returned %b, i32 %x)
ret %struct.D* %this
}

Expand Down Expand Up @@ -166,8 +166,8 @@ entry:
; CHECK-LABEL: E_ctor_base:
; CHECK-NOT: b {{_?B_ctor_complete}}
%b = getelementptr inbounds %struct.E, %struct.E* %this, i32 0, i32 0
%call = tail call %struct.B* @B_ctor_complete(%struct.B* %b, i32 %x)
%call = tail call %struct.B* @B_ctor_complete(%struct.B* returned %b, i32 %x)
%b2 = getelementptr inbounds %struct.E, %struct.E* %this, i32 0, i32 1
%call2 = tail call %struct.B* @B_ctor_complete(%struct.B* %b2, i32 %x)
%call2 = tail call %struct.B* @B_ctor_complete(%struct.B* returned %b2, i32 %x)
ret %struct.E* %this
}
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/AArch64/bitfield-extract.ll
Expand Up @@ -91,7 +91,7 @@ define signext i16 @test10(i64 %a) {
define void @test11(i64 %a) {
%tmp = lshr i64 %a, 23
%res = trunc i64 %tmp to i16
call void @use(i16 %res, i64 %tmp)
call void @use(i16 signext %res, i64 %tmp)
ret void
}

Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/AArch64/tailcall-explicit-sret.ll
Expand Up @@ -11,7 +11,7 @@ declare void @test_explicit_sret(i1024* sret(i1024)) #0
; CHECK-LABEL: _test_tailcall_explicit_sret:
; CHECK-NEXT: b _test_explicit_sret
define void @test_tailcall_explicit_sret(i1024* sret(i1024) %arg) #0 {
tail call void @test_explicit_sret(i1024* %arg)
tail call void @test_explicit_sret(i1024* sret(i1024) %arg)
ret void
}

Expand All @@ -20,7 +20,7 @@ define void @test_tailcall_explicit_sret(i1024* sret(i1024) %arg) #0 {
; CHECK: bl _test_explicit_sret
; CHECK: ret
define void @test_call_explicit_sret(i1024* sret(i1024) %arg) #0 {
call void @test_explicit_sret(i1024* %arg)
call void @test_explicit_sret(i1024* sret(i1024) %arg)
ret void
}

Expand All @@ -30,7 +30,7 @@ define void @test_call_explicit_sret(i1024* sret(i1024) %arg) #0 {
; CHECK: ret
define void @test_tailcall_explicit_sret_alloca_unused() #0 {
%l = alloca i1024, align 8
tail call void @test_explicit_sret(i1024* %l)
tail call void @test_explicit_sret(i1024* sret(i1024) %l)
ret void
}

Expand All @@ -44,7 +44,7 @@ define void @test_tailcall_explicit_sret_alloca_dummyusers(i1024* %ptr) #0 {
%l = alloca i1024, align 8
%r = load i1024, i1024* %ptr, align 8
store i1024 %r, i1024* %l, align 8
tail call void @test_explicit_sret(i1024* %l)
tail call void @test_explicit_sret(i1024* sret(i1024) %l)
ret void
}

Expand All @@ -56,7 +56,7 @@ define void @test_tailcall_explicit_sret_alloca_dummyusers(i1024* %ptr) #0 {
; CHECK: ret
define void @test_tailcall_explicit_sret_gep(i1024* %ptr) #0 {
%ptr2 = getelementptr i1024, i1024* %ptr, i32 1
tail call void @test_explicit_sret(i1024* %ptr2)
tail call void @test_explicit_sret(i1024* sret(i1024) %ptr2)
ret void
}

Expand All @@ -69,7 +69,7 @@ define void @test_tailcall_explicit_sret_gep(i1024* %ptr) #0 {
; CHECK: ret
define i1024 @test_tailcall_explicit_sret_alloca_returned() #0 {
%l = alloca i1024, align 8
tail call void @test_explicit_sret(i1024* %l)
tail call void @test_explicit_sret(i1024* sret(i1024) %l)
%r = load i1024, i1024* %l, align 8
ret i1024 %r
}
Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/AMDGPU/call-argument-types.ll
Expand Up @@ -92,7 +92,7 @@ define amdgpu_kernel void @test_call_external_void_func_i1_imm() #0 {
; GCN-NEXT: s_endpgm
define amdgpu_kernel void @test_call_external_void_func_i1_signext(i32) #0 {
%var = load volatile i1, i1 addrspace(1)* undef
call void @external_void_func_i1_signext(i1 %var)
call void @external_void_func_i1_signext(i1 signext %var)
ret void
}

Expand All @@ -113,7 +113,7 @@ define amdgpu_kernel void @test_call_external_void_func_i1_signext(i32) #0 {
; GCN-NEXT: s_endpgm
define amdgpu_kernel void @test_call_external_void_func_i1_zeroext(i32) #0 {
%var = load volatile i1, i1 addrspace(1)* undef
call void @external_void_func_i1_zeroext(i1 %var)
call void @external_void_func_i1_zeroext(i1 zeroext %var)
ret void
}

Expand Down Expand Up @@ -148,7 +148,7 @@ define amdgpu_kernel void @test_call_external_void_func_i8_imm(i32) #0 {
; GCN-NEXT: s_endpgm
define amdgpu_kernel void @test_call_external_void_func_i8_signext(i32) #0 {
%var = load volatile i8, i8 addrspace(1)* undef
call void @external_void_func_i8_signext(i8 %var)
call void @external_void_func_i8_signext(i8 signext %var)
ret void
}

Expand All @@ -166,7 +166,7 @@ define amdgpu_kernel void @test_call_external_void_func_i8_signext(i32) #0 {
; GCN-NEXT: s_endpgm
define amdgpu_kernel void @test_call_external_void_func_i8_zeroext(i32) #0 {
%var = load volatile i8, i8 addrspace(1)* undef
call void @external_void_func_i8_zeroext(i8 %var)
call void @external_void_func_i8_zeroext(i8 zeroext %var)
ret void
}

Expand Down Expand Up @@ -195,7 +195,7 @@ define amdgpu_kernel void @test_call_external_void_func_i16_imm() #0 {
; GCN-NEXT: s_endpgm
define amdgpu_kernel void @test_call_external_void_func_i16_signext(i32) #0 {
%var = load volatile i16, i16 addrspace(1)* undef
call void @external_void_func_i16_signext(i16 %var)
call void @external_void_func_i16_signext(i16 signext %var)
ret void
}

Expand All @@ -212,7 +212,7 @@ define amdgpu_kernel void @test_call_external_void_func_i16_signext(i32) #0 {
; GCN-NEXT: s_endpgm
define amdgpu_kernel void @test_call_external_void_func_i16_zeroext(i32) #0 {
%var = load volatile i16, i16 addrspace(1)* undef
call void @external_void_func_i16_zeroext(i16 %var)
call void @external_void_func_i16_zeroext(i16 zeroext %var)
ret void
}

Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/AMDGPU/callee-special-input-vgprs-packed.ll
Expand Up @@ -517,7 +517,7 @@ define amdgpu_kernel void @kern_call_too_many_args_use_workitem_id_x_byval() #1
i32 210, i32 220, i32 230, i32 240,
i32 250, i32 260, i32 270, i32 280,
i32 290, i32 300, i32 310, i32 320,
i32 addrspace(5)* %alloca)
i32 addrspace(5)* byval(i32) %alloca)
ret void
}

Expand All @@ -541,7 +541,7 @@ define void @func_call_too_many_args_use_workitem_id_x_byval() #1 {
i32 210, i32 220, i32 230, i32 240,
i32 250, i32 260, i32 270, i32 280,
i32 290, i32 300, i32 310, i32 320,
i32 addrspace(5)* %alloca)
i32 addrspace(5)* byval(i32) %alloca)
ret void
}

Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/AMDGPU/callee-special-input-vgprs.ll
Expand Up @@ -649,7 +649,7 @@ define amdgpu_kernel void @kern_call_too_many_args_use_workitem_id_x_byval() #1
i32 210, i32 220, i32 230, i32 240,
i32 250, i32 260, i32 270, i32 280,
i32 290, i32 300, i32 310, i32 320,
i32 addrspace(5)* %alloca)
i32 addrspace(5)* byval(i32) %alloca)
ret void
}

Expand Down Expand Up @@ -686,7 +686,7 @@ define void @func_call_too_many_args_use_workitem_id_x_byval() #1 {
i32 210, i32 220, i32 230, i32 240,
i32 250, i32 260, i32 270, i32 280,
i32 290, i32 300, i32 310, i32 320,
i32 addrspace(5)* %alloca)
i32 addrspace(5)* byval(i32) %alloca)
ret void
}

Expand Down

0 comments on commit 1c7f323

Please sign in to comment.