diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp index 18bf32fe1acaa..4cca291a24562 100644 --- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp @@ -6087,6 +6087,10 @@ bool X86TTIImpl::areInlineCompatible(const Function *Caller, for (const Instruction &I : instructions(Callee)) { if (const auto *CB = dyn_cast(&I)) { + // Having more target features is fine for inline ASM. + if (CB->isInlineAsm()) + continue; + SmallVector Types; for (Value *Arg : CB->args()) Types.push_back(Arg->getType()); diff --git a/llvm/test/Transforms/Inline/X86/call-abi-compatibility.ll b/llvm/test/Transforms/Inline/X86/call-abi-compatibility.ll index f03270bafea99..6f582cab2f145 100644 --- a/llvm/test/Transforms/Inline/X86/call-abi-compatibility.ll +++ b/llvm/test/Transforms/Inline/X86/call-abi-compatibility.ll @@ -94,27 +94,22 @@ define internal void @caller_not_avx4() { declare i64 @caller_unknown_simple(i64) -; FIXME: This call should get inlined, because the callee only contains +; This call should get inlined, because the callee only contains ; inline ASM, not real calls. define <8 x i64> @caller_inline_asm(ptr %p0, i64 %k, ptr %p1, ptr %p2) #0 { ; CHECK-LABEL: define {{[^@]+}}@caller_inline_asm ; CHECK-SAME: (ptr [[P0:%.*]], i64 [[K:%.*]], ptr [[P1:%.*]], ptr [[P2:%.*]]) #[[ATTR2:[0-9]+]] { -; CHECK-NEXT: [[CALL:%.*]] = call <8 x i64> @callee_inline_asm(ptr [[P0]], i64 [[K]], ptr [[P1]], ptr [[P2]]) -; CHECK-NEXT: ret <8 x i64> [[CALL]] +; CHECK-NEXT: [[SRC_I:%.*]] = load <8 x i64>, ptr [[P0]], align 64 +; CHECK-NEXT: [[A_I:%.*]] = load <8 x i64>, ptr [[P1]], align 64 +; CHECK-NEXT: [[B_I:%.*]] = load <8 x i64>, ptr [[P2]], align 64 +; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i64> asm "vpaddb\09$($3, $2, $0 {$1}", "=v,^Yk,v,v,0,~{dirflag},~{fpsr},~{flags}"(i64 [[K]], <8 x i64> [[A_I]], <8 x i64> [[B_I]], <8 x i64> [[SRC_I]]) +; CHECK-NEXT: ret <8 x i64> [[TMP1]] ; %call = call <8 x i64> @callee_inline_asm(ptr %p0, i64 %k, ptr %p1, ptr %p2) ret <8 x i64> %call } define internal <8 x i64> @callee_inline_asm(ptr %p0, i64 %k, ptr %p1, ptr %p2) #1 { -; CHECK-LABEL: define {{[^@]+}}@callee_inline_asm -; CHECK-SAME: (ptr [[P0:%.*]], i64 [[K:%.*]], ptr [[P1:%.*]], ptr [[P2:%.*]]) #[[ATTR3:[0-9]+]] { -; CHECK-NEXT: [[SRC:%.*]] = load <8 x i64>, ptr [[P0]], align 64 -; CHECK-NEXT: [[A:%.*]] = load <8 x i64>, ptr [[P1]], align 64 -; CHECK-NEXT: [[B:%.*]] = load <8 x i64>, ptr [[P2]], align 64 -; CHECK-NEXT: [[TMP1:%.*]] = tail call <8 x i64> asm "vpaddb\09$($3, $2, $0 {$1}", "=v,^Yk,v,v,0,~{dirflag},~{fpsr},~{flags}"(i64 [[K]], <8 x i64> [[A]], <8 x i64> [[B]], <8 x i64> [[SRC]]) -; CHECK-NEXT: ret <8 x i64> [[TMP1]] -; %src = load <8 x i64>, ptr %p0, align 64 %a = load <8 x i64>, ptr %p1, align 64 %b = load <8 x i64>, ptr %p2, align 64