Skip to content

Commit

Permalink
[X86] Add RET-hardening Support to mitigate Load Value Injection (LVI)
Browse files Browse the repository at this point in the history
Adding a pass that replaces every ret instruction with the sequence:

pop <scratch-reg>
lfence
jmp *<scratch-reg>

where <scratch-reg> is some available scratch register, according to the
calling convention of the function being mitigated.

Differential Revision: https://reviews.llvm.org/D75935
  • Loading branch information
scottconstable authored and tstellar committed Jun 24, 2020
1 parent 071acfd commit 6a45895
Show file tree
Hide file tree
Showing 7 changed files with 219 additions and 0 deletions.
1 change: 1 addition & 0 deletions llvm/lib/Target/X86/CMakeLists.txt
Expand Up @@ -52,6 +52,7 @@ set(sources
X86InstrInfo.cpp
X86EvexToVex.cpp
X86LegalizerInfo.cpp
X86LoadValueInjectionRetHardening.cpp
X86MCInstLower.cpp
X86MachineFunctionInfo.cpp
X86MacroFusion.cpp
Expand Down
2 changes: 2 additions & 0 deletions llvm/lib/Target/X86/X86.h
Expand Up @@ -133,6 +133,7 @@ InstructionSelector *createX86InstructionSelector(const X86TargetMachine &TM,
X86Subtarget &,
X86RegisterBankInfo &);

FunctionPass *createX86LoadValueInjectionRetHardeningPass();
FunctionPass *createX86SpeculativeLoadHardeningPass();

void initializeEvexToVexInstPassPass(PassRegistry &);
Expand All @@ -148,6 +149,7 @@ void initializeX86DomainReassignmentPass(PassRegistry &);
void initializeX86ExecutionDomainFixPass(PassRegistry &);
void initializeX86ExpandPseudoPass(PassRegistry &);
void initializeX86FlagsCopyLoweringPassPass(PassRegistry &);
void initializeX86LoadValueInjectionRetHardeningPassPass(PassRegistry &);
void initializeX86OptimizeLEAPassPass(PassRegistry &);
void initializeX86SpeculativeLoadHardeningPassPass(PassRegistry &);

Expand Down
140 changes: 140 additions & 0 deletions llvm/lib/Target/X86/X86LoadValueInjectionRetHardening.cpp
@@ -0,0 +1,140 @@
//===-- X86LoadValueInjectionRetHardening.cpp - LVI RET hardening for x86 --==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// Description: Replaces every `ret` instruction with the sequence:
/// ```
/// pop <scratch-reg>
/// lfence
/// jmp *<scratch-reg>
/// ```
/// where `<scratch-reg>` is some available scratch register, according to the
/// calling convention of the function being mitigated.
///
//===----------------------------------------------------------------------===//

#include "X86.h"
#include "X86InstrBuilder.h"
#include "X86Subtarget.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/IR/Function.h"
#include "llvm/Support/Debug.h"
#include <bitset>

using namespace llvm;

#define PASS_KEY "x86-lvi-ret"
#define DEBUG_TYPE PASS_KEY

STATISTIC(NumFences, "Number of LFENCEs inserted for LVI mitigation");
STATISTIC(NumFunctionsConsidered, "Number of functions analyzed");
STATISTIC(NumFunctionsMitigated, "Number of functions for which mitigations "
"were deployed");

namespace {

class X86LoadValueInjectionRetHardeningPass : public MachineFunctionPass {
public:
X86LoadValueInjectionRetHardeningPass() : MachineFunctionPass(ID) {}
StringRef getPassName() const override {
return "X86 Load Value Injection (LVI) Ret-Hardening";
}
bool runOnMachineFunction(MachineFunction &MF) override;

static char ID;
};

} // end anonymous namespace

char X86LoadValueInjectionRetHardeningPass::ID = 0;

bool X86LoadValueInjectionRetHardeningPass::runOnMachineFunction(
MachineFunction &MF) {
LLVM_DEBUG(dbgs() << "***** " << getPassName() << " : " << MF.getName()
<< " *****\n");
const X86Subtarget *Subtarget = &MF.getSubtarget<X86Subtarget>();
if (!Subtarget->useLVIControlFlowIntegrity() || !Subtarget->is64Bit())
return false; // FIXME: support 32-bit

// Don't skip functions with the "optnone" attr but participate in opt-bisect.
const Function &F = MF.getFunction();
if (!F.hasOptNone() && skipFunction(F))
return false;

++NumFunctionsConsidered;
const X86RegisterInfo *TRI = Subtarget->getRegisterInfo();
const X86InstrInfo *TII = Subtarget->getInstrInfo();
unsigned ClobberReg = X86::NoRegister;
std::bitset<X86::NUM_TARGET_REGS> UnclobberableGR64s;
UnclobberableGR64s.set(X86::RSP); // can't clobber stack pointer
UnclobberableGR64s.set(X86::RIP); // can't clobber instruction pointer
UnclobberableGR64s.set(X86::RAX); // used for function return
UnclobberableGR64s.set(X86::RDX); // used for function return

// We can clobber any register allowed by the function's calling convention.
for (const MCPhysReg *PR = TRI->getCalleeSavedRegs(&MF); auto Reg = *PR; ++PR)
UnclobberableGR64s.set(Reg);
for (auto &Reg : X86::GR64RegClass) {
if (!UnclobberableGR64s.test(Reg)) {
ClobberReg = Reg;
break;
}
}

if (ClobberReg != X86::NoRegister) {
LLVM_DEBUG(dbgs() << "Selected register "
<< Subtarget->getRegisterInfo()->getRegAsmName(ClobberReg)
<< " to clobber\n");
} else {
LLVM_DEBUG(dbgs() << "Could not find a register to clobber\n");
}

bool Modified = false;
for (auto &MBB : MF) {
MachineInstr &MI = MBB.back();
if (MI.getOpcode() != X86::RETQ)
continue;

if (ClobberReg != X86::NoRegister) {
MBB.erase_instr(&MI);
BuildMI(MBB, MBB.end(), DebugLoc(), TII->get(X86::POP64r))
.addReg(ClobberReg, RegState::Define)
.setMIFlag(MachineInstr::FrameDestroy);
BuildMI(MBB, MBB.end(), DebugLoc(), TII->get(X86::LFENCE));
BuildMI(MBB, MBB.end(), DebugLoc(), TII->get(X86::JMP64r))
.addReg(ClobberReg);
} else {
// In case there is no available scratch register, we can still read from
// RSP to assert that RSP points to a valid page. The write to RSP is
// also helpful because it verifies that the stack's write permissions
// are intact.
MachineInstr *Fence = BuildMI(MBB, MI, DebugLoc(), TII->get(X86::LFENCE));
addRegOffset(BuildMI(MBB, Fence, DebugLoc(), TII->get(X86::SHL64mi)),
X86::RSP, false, 0)
.addImm(0)
->addRegisterDead(X86::EFLAGS, TRI);
}

++NumFences;
Modified = true;
}

if (Modified)
++NumFunctionsMitigated;
return Modified;
}

INITIALIZE_PASS(X86LoadValueInjectionRetHardeningPass, PASS_KEY,
"X86 LVI ret hardener", false, false)

FunctionPass *llvm::createX86LoadValueInjectionRetHardeningPass() {
return new X86LoadValueInjectionRetHardeningPass();
}
2 changes: 2 additions & 0 deletions llvm/lib/Target/X86/X86TargetMachine.cpp
Expand Up @@ -82,6 +82,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeX86Target() {
initializeX86SpeculativeLoadHardeningPassPass(PR);
initializeX86FlagsCopyLoweringPassPass(PR);
initializeX86CondBrFoldingPassPass(PR);
initializeX86LoadValueInjectionRetHardeningPassPass(PR);
initializeX86OptimizeLEAPassPass(PR);
}

Expand Down Expand Up @@ -542,6 +543,7 @@ void X86PassConfig::addPreEmitPass2() {
// Identify valid longjmp targets for Windows Control Flow Guard.
if (TT.isOSWindows())
addPass(createCFGuardLongjmpPass());
addPass(createX86LoadValueInjectionRetHardeningPass());
}

std::unique_ptr<CSEConfigBase> X86PassConfig::getCSEConfig() const {
Expand Down
1 change: 1 addition & 0 deletions llvm/test/CodeGen/X86/O0-pipeline.ll
Expand Up @@ -73,6 +73,7 @@
; CHECK-NEXT: Live DEBUG_VALUE analysis
; CHECK-NEXT: X86 Indirect Thunks
; CHECK-NEXT: Check CFA info and insert CFI instructions if needed
; CHECK-NEXT: X86 Load Value Injection (LVI) Ret-Hardening
; CHECK-NEXT: Lazy Machine Block Frequency Analysis
; CHECK-NEXT: Machine Optimization Remark Emitter
; CHECK-NEXT: X86 Assembly Printer
Expand Down
1 change: 1 addition & 0 deletions llvm/test/CodeGen/X86/O3-pipeline.ll
Expand Up @@ -182,6 +182,7 @@
; CHECK-NEXT: Live DEBUG_VALUE analysis
; CHECK-NEXT: X86 Indirect Thunks
; CHECK-NEXT: Check CFA info and insert CFI instructions if needed
; CHECK-NEXT: X86 Load Value Injection (LVI) Ret-Hardening
; CHECK-NEXT: Lazy Machine Block Frequency Analysis
; CHECK-NEXT: Machine Optimization Remark Emitter
; CHECK-NEXT: X86 Assembly Printer
Expand Down
72 changes: 72 additions & 0 deletions llvm/test/CodeGen/X86/lvi-hardening-ret.ll
@@ -0,0 +1,72 @@
; RUN: llc -verify-machineinstrs -mtriple=x86_64-unknown < %s | FileCheck %s

define dso_local void @one_instruction() #0 {
; CHECK-LABEL: one_instruction:
entry:
ret void
; CHECK-NOT: retq
; CHECK: popq %[[x:[^ ]*]]
; CHECK-NEXT: lfence
; CHECK-NEXT: jmpq *%[[x]]
}

; Function Attrs: noinline nounwind optnone uwtable
define dso_local i32 @ordinary_function(i32 %x, i32 %y) #0 {
; CHECK-LABEL: ordinary_function:
entry:
%x.addr = alloca i32, align 4
%y.addr = alloca i32, align 4
store i32 %x, i32* %x.addr, align 4
store i32 %y, i32* %y.addr, align 4
%0 = load i32, i32* %x.addr, align 4
%1 = load i32, i32* %y.addr, align 4
%add = add nsw i32 %0, %1
ret i32 %add
; CHECK-NOT: retq
; CHECK: popq %[[x:[^ ]*]]
; CHECK-NEXT: lfence
; CHECK-NEXT: jmpq *%[[x]]
}

; Function Attrs: noinline nounwind optnone uwtable
define dso_local i32 @no_caller_saved_registers_function(i32 %x, i32 %y) #1 {
; CHECK-LABEL: no_caller_saved_registers_function:
entry:
%x.addr = alloca i32, align 4
%y.addr = alloca i32, align 4
store i32 %x, i32* %x.addr, align 4
store i32 %y, i32* %y.addr, align 4
%0 = load i32, i32* %x.addr, align 4
%1 = load i32, i32* %y.addr, align 4
%add = add nsw i32 %0, %1
ret i32 %add
; CHECK-NOT: retq
; CHECK: shlq $0, (%{{[^ ]*}})
; CHECK-NEXT: lfence
; CHECK-NEXT: retq
}

; Function Attrs: noinline nounwind optnone uwtable
define dso_local preserve_mostcc void @preserve_most() #0 {
; CHECK-LABEL: preserve_most:
entry:
ret void
; CHECK-NOT: retq
; CHECK: popq %r11
; CHECK-NEXT: lfence
; CHECK-NEXT: jmpq *%r11
}

; Function Attrs: noinline nounwind optnone uwtable
define dso_local preserve_allcc void @preserve_all() #0 {
; CHECK-LABEL: preserve_all:
entry:
ret void
; CHECK-NOT: retq
; CHECK: popq %r11
; CHECK-NEXT: lfence
; CHECK-NEXT: jmpq *%r11
}

attributes #0 = { "target-features"="+lvi-cfi" }
attributes #1 = { "no_caller_saved_registers" "target-features"="+lvi-cfi" }

0 comments on commit 6a45895

Please sign in to comment.