diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp index 58ebe023cd61e..7ce0aa22b9979 100644 --- a/llvm/lib/Target/X86/X86MCInstLower.cpp +++ b/llvm/lib/Target/X86/X86MCInstLower.cpp @@ -959,8 +959,10 @@ void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI, SmallString<256> Code; unsigned MinSize = MI.getOperand(0).getImm(); - if (NextMI != MI.getParent()->end()) { + if (NextMI != MI.getParent()->end() && !NextMI->isInlineAsm()) { // Lower the next MachineInstr to find its byte size. + // If the next instruction is inline assembly, we skip lowering it for now, + // and assume we should always generate NOPs. MCInst MCI; MCIL.Lower(&*NextMI, MCI); diff --git a/llvm/test/CodeGen/X86/patchable-prologue.ll b/llvm/test/CodeGen/X86/patchable-prologue.ll index 71a392845fdea..43761e3d1e1eb 100644 --- a/llvm/test/CodeGen/X86/patchable-prologue.ll +++ b/llvm/test/CodeGen/X86/patchable-prologue.ll @@ -193,3 +193,20 @@ do.body: ; preds = %do.body, %entry do.end: ; preds = %do.body ret void } + + +; Test that inline asm is properly hotpatched. We currently don't examine the +; asm instruction when printing it, thus we always emit patching NOPs. + +; 64: inline_asm: +; 64-NEXT: # %bb.0: +; 64-NEXT: xchgw %ax, %ax # encoding: [0x66,0x90] +; 64-NEXT: #APP +; 64-NEXT: int3 # encoding: [0xcc] +; 64-NEXT: #NO_APP + +define dso_local void @inline_asm() "patchable-function"="prologue-short-redirect" { +entry: + call void asm sideeffect "int3", "~{dirflag},~{fpsr},~{flags}"() + ret void +}