diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp index 6301285fe9545..cff071c4f24b3 100644 --- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp +++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp @@ -1840,7 +1840,7 @@ MachineInstr *X86SpeculativeLoadHardeningPass::sinkPostLoadHardenedInst( // just bail. Also check that its register class is one of the ones we // can harden. Register UseDefReg = UseMI.getOperand(0).getReg(); - if (!UseDefReg.isVirtual() || !canHardenRegister(UseDefReg)) + if (!canHardenRegister(UseDefReg)) return {}; SingleUseMI = &UseMI; @@ -1863,6 +1863,10 @@ MachineInstr *X86SpeculativeLoadHardeningPass::sinkPostLoadHardenedInst( } bool X86SpeculativeLoadHardeningPass::canHardenRegister(Register Reg) { + // We only support hardening virtual registers. + if (!Reg.isVirtual()) + return false; + auto *RC = MRI->getRegClass(Reg); int RegBytes = TRI->getRegSizeInBits(*RC) / 8; if (RegBytes > 8) @@ -1909,7 +1913,6 @@ unsigned X86SpeculativeLoadHardeningPass::hardenValueInRegister( Register Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, const DebugLoc &Loc) { assert(canHardenRegister(Reg) && "Cannot harden this register!"); - assert(Reg.isVirtual() && "Cannot harden a physical register!"); auto *RC = MRI->getRegClass(Reg); int Bytes = TRI->getRegSizeInBits(*RC) / 8; diff --git a/llvm/test/CodeGen/X86/speculative-load-hardening.ll b/llvm/test/CodeGen/X86/speculative-load-hardening.ll index 0c47fcddc43af..45da777ea4e2a 100644 --- a/llvm/test/CodeGen/X86/speculative-load-hardening.ll +++ b/llvm/test/CodeGen/X86/speculative-load-hardening.ll @@ -1161,3 +1161,34 @@ define void @idempotent_atomic(ptr %x) speculative_load_hardening { %tmp = atomicrmw or ptr %x, i32 0 seq_cst ret void } + +; Make sure we don't crash on longjmps (PR60081). +declare void @llvm.eh.sjlj.longjmp(ptr) +define void @test_longjmp(ptr %env) speculative_load_hardening { +; X64-LABEL: test_longjmp: +; X64: # %bb.0: +; X64-NEXT: pushq %rbp +; X64-NEXT: .cfi_def_cfa_offset 16 +; X64-NEXT: .cfi_offset %rbp, -16 +; X64-NEXT: movq %rsp, %rax +; X64-NEXT: movq $-1, %rcx +; X64-NEXT: sarq $63, %rax +; X64-NEXT: orq %rax, %rdi +; X64-NEXT: movq (%rdi), %rbp +; X64-NEXT: movq 8(%rdi), %rcx +; X64-NEXT: movq 16(%rdi), %rsp +; X64-NEXT: orq %rax, %rcx +; X64-NEXT: jmpq *%rcx +; +; X64-LFENCE-LABEL: test_longjmp: +; X64-LFENCE: # %bb.0: +; X64-LFENCE-NEXT: pushq %rbp +; X64-LFENCE-NEXT: .cfi_def_cfa_offset 16 +; X64-LFENCE-NEXT: .cfi_offset %rbp, -16 +; X64-LFENCE-NEXT: movq (%rdi), %rbp +; X64-LFENCE-NEXT: movq 8(%rdi), %rax +; X64-LFENCE-NEXT: movq 16(%rdi), %rsp +; X64-LFENCE-NEXT: jmpq *%rax + call void @llvm.eh.sjlj.longjmp(ptr %env) + unreachable +}