Skip to content

Commit

Permalink
PCC: working end-to-end for static memories!
Browse files Browse the repository at this point in the history
Co-authored-by: Nick Fitzgerald <fitzgen@gmail.com>
  • Loading branch information
cfallin and fitzgen committed Oct 17, 2023
1 parent 725bdd9 commit f8d59bd
Show file tree
Hide file tree
Showing 7 changed files with 199 additions and 32 deletions.
56 changes: 28 additions & 28 deletions cranelift/codegen/src/legalizer/globalvalue.rs
Expand Up @@ -21,7 +21,7 @@ pub fn expand_global_value(
);

match func.global_values[global_value] {
ir::GlobalValueData::VMContext => vmctx_addr(inst, func),
ir::GlobalValueData::VMContext => vmctx_addr(global_value, inst, func),
ir::GlobalValueData::IAddImm {
base,
offset,
Expand Down Expand Up @@ -52,7 +52,7 @@ fn const_vector_scale(inst: ir::Inst, func: &mut ir::Function, ty: ir::Type, isa
}

/// Expand a `global_value` instruction for a vmctx global.
fn vmctx_addr(inst: ir::Inst, func: &mut ir::Function) {
fn vmctx_addr(global_value: ir::GlobalValue, inst: ir::Inst, func: &mut ir::Function) {
// Get the value representing the `vmctx` argument.
let vmctx = func
.special_param(ir::ArgumentPurpose::VMContext)
Expand All @@ -63,6 +63,22 @@ fn vmctx_addr(inst: ir::Inst, func: &mut ir::Function) {
func.dfg.clear_results(inst);
func.dfg.change_to_alias(result, vmctx);
func.layout.remove_inst(inst);

crate::trace!(
"expanding vmctx gv {}: fact = {:?}, vmctx arg = {}",
global_value,
func.global_value_facts[global_value],
vmctx
);

// If there was a fact on the GV, then copy it to the vmctx arg
// blockparam def.
if let Some(fact) = &func.global_value_facts[global_value] {
if func.dfg.facts[vmctx].is_none() {
let fact = fact.clone();
func.dfg.facts[vmctx] = Some(fact);
}
}
}

/// Expand a `global_value` instruction for an iadd_imm global.
Expand All @@ -75,19 +91,11 @@ fn iadd_imm_addr(
) {
let mut pos = FuncCursor::new(func).at_inst(inst);

// Get the value for the lhs. For tidiness, expand VMContext here so that we avoid
// `vmctx_addr` which creates an otherwise unneeded value alias.
let lhs = if let ir::GlobalValueData::VMContext = pos.func.global_values[base] {
pos.func
.special_param(ir::ArgumentPurpose::VMContext)
.expect("Missing vmctx parameter")
} else {
let gv = pos.ins().global_value(global_type, base);
if let Some(fact) = &pos.func.global_value_facts[base] {
pos.func.dfg.facts[gv] = Some(fact.clone());
}
gv
};
// Get the value for the lhs.
let lhs = pos.ins().global_value(global_type, base);
if let Some(fact) = &pos.func.global_value_facts[base] {
pos.func.dfg.facts[lhs] = Some(fact.clone());
}

// Simply replace the `global_value` instruction with an `iadd_imm`, reusing the result value.
pos.func.dfg.replace(inst).iadd_imm(lhs, offset);
Expand All @@ -110,19 +118,11 @@ fn load_addr(
let mut pos = FuncCursor::new(func).at_inst(inst);
pos.use_srcloc(inst);

// Get the value for the base. For tidiness, expand VMContext here so that we avoid
// `vmctx_addr` which creates an otherwise unneeded value alias.
let base_addr = if let ir::GlobalValueData::VMContext = pos.func.global_values[base] {
pos.func
.special_param(ir::ArgumentPurpose::VMContext)
.expect("Missing vmctx parameter")
} else {
let gv = pos.ins().global_value(ptr_ty, base);
if let Some(fact) = &pos.func.global_value_facts[base] {
pos.func.dfg.facts[gv] = Some(fact.clone());
}
gv
};
// Get the value for the base.
let base_addr = pos.ins().global_value(ptr_ty, base);
if let Some(fact) = &pos.func.global_value_facts[base] {
pos.func.dfg.facts[base_addr] = Some(fact.clone());
}

// Perform the load.
pos.func
Expand Down
5 changes: 5 additions & 0 deletions cranelift/wasm/src/code_translator.rs
Expand Up @@ -2811,6 +2811,11 @@ where
let mut flags = MemFlags::new();
flags.set_endianness(ir::Endianness::Little);

if heap.memory_type.is_some() {
// Proof-carrying code is enabled; check this memory access.
flags.set_checked();
}

// The access occurs to the `heap` disjoint category of abstract
// state. This may allow alias analysis to merge redundant loads,
// etc. when heap accesses occur interleaved with other (table,
Expand Down
58 changes: 56 additions & 2 deletions cranelift/wasm/src/code_translator/bounds_checks.rs
Expand Up @@ -23,6 +23,7 @@ use super::Reachability;
use crate::{FuncEnvironment, HeapData, HeapStyle};
use cranelift_codegen::{
cursor::{Cursor, FuncCursor},
ir::pcc::Fact,
ir::{self, condcodes::IntCC, InstBuilder, RelSourceLoc},
};
use cranelift_frontend::FunctionBuilder;
Expand Down Expand Up @@ -56,6 +57,7 @@ where
);
let offset_and_size = offset_plus_size(offset, access_size);
let spectre_mitigations_enabled = env.heap_access_spectre_mitigation();
let pcc = env.proof_carrying_code();

// We need to emit code that will trap (or compute an address that will trap
// when accessed) if
Expand Down Expand Up @@ -95,6 +97,7 @@ where
index,
offset,
spectre_mitigations_enabled,
pcc,
oob,
))
}
Expand Down Expand Up @@ -134,6 +137,7 @@ where
index,
offset,
spectre_mitigations_enabled,
pcc,
oob,
))
}
Expand All @@ -158,6 +162,7 @@ where
index,
offset,
spectre_mitigations_enabled,
pcc,
oob,
))
}
Expand Down Expand Up @@ -187,6 +192,7 @@ where
index,
offset,
spectre_mitigations_enabled,
pcc,
oob,
))
}
Expand Down Expand Up @@ -254,6 +260,7 @@ where
env.pointer_type(),
index,
offset,
pcc,
))
}

Expand Down Expand Up @@ -283,6 +290,7 @@ where
index,
offset,
spectre_mitigations_enabled,
pcc,
oob,
))
}
Expand Down Expand Up @@ -332,6 +340,8 @@ fn explicit_check_oob_condition_and_compute_addr(
offset: u32,
// Whether Spectre mitigations are enabled for heap accesses.
spectre_mitigations_enabled: bool,
// Whether we're emitting PCC facts.
pcc: bool,
// The `i8` boolean value that is non-zero when the heap access is out of
// bounds (and therefore we should trap) and is zero when the heap access is
// in bounds (and therefore we can proceed).
Expand All @@ -342,7 +352,7 @@ fn explicit_check_oob_condition_and_compute_addr(
.trapnz(oob_condition, ir::TrapCode::HeapOutOfBounds);
}

let mut addr = compute_addr(pos, heap, addr_ty, index, offset);
let mut addr = compute_addr(pos, heap, addr_ty, index, offset, pcc);

if spectre_mitigations_enabled {
let null = pos.ins().iconst(addr_ty, 0);
Expand All @@ -364,19 +374,63 @@ fn compute_addr(
addr_ty: ir::Type,
index: ir::Value,
offset: u32,
pcc: bool,
) -> ir::Value {
debug_assert_eq!(pos.func.dfg.value_type(index), addr_ty);

let heap_base = pos.ins().global_value(addr_ty, heap.base);

let pcc_memtype = if pcc {
Some(
heap.memory_type
.expect("A memory type is required when PCC is enabled"),
)
} else {
None
};

if let Some(ty) = pcc_memtype {
pos.func.dfg.facts[heap_base] = Some(Fact::Mem {
ty,
min_offset: 0,
max_offset: 0,
});
}

let base_and_index = pos.ins().iadd(heap_base, index);

if let Some(ty) = pcc_memtype {
// TODO: handle memory64 as well (pass in the CLIF type of
// `index`).
pos.func.dfg.facts[base_and_index] = Some(Fact::Mem {
ty,
min_offset: 0,
max_offset: u64::from(u32::MAX),
});
}

if offset == 0 {
base_and_index
} else {
// NB: The addition of the offset immediate must happen *before* the
// `select_spectre_guard`, if any. If it happens after, then we
// potentially are letting speculative execution read the whole first
// 4GiB of memory.
pos.ins().iadd_imm(base_and_index, offset as i64)
//
// TODO: add a fact on the result value.
let result = pos.ins().iadd_imm(base_and_index, offset as i64);
if let Some(ty) = pcc_memtype {
pos.func.dfg.facts[result] = Some(Fact::Mem {
ty,
min_offset: u64::from(offset),
// Safety: can't overflow -- two u32s summed in a
// 64-bit add. TODO: when memory64 is supported here,
// `u32::MAX` is no longer true, and we'll need to
// handle overflow here.
max_offset: u64::from(u32::MAX) + u64::from(offset),
});
}
result
}
}

Expand Down
9 changes: 9 additions & 0 deletions cranelift/wasm/src/environ/dummy.rs
Expand Up @@ -261,6 +261,10 @@ impl<'dummy_environment> TargetEnvironment for DummyFuncEnvironment<'dummy_envir
fn heap_access_spectre_mitigation(&self) -> bool {
false
}

fn proof_carrying_code(&self) -> bool {
false
}
}

impl<'dummy_environment> FuncEnvironment for DummyFuncEnvironment<'dummy_environment> {
Expand Down Expand Up @@ -308,6 +312,7 @@ impl<'dummy_environment> FuncEnvironment for DummyFuncEnvironment<'dummy_environ
bound: 0x1_0000_0000,
},
index_type: I32,
memory_type: None,
}))
}

Expand Down Expand Up @@ -711,6 +716,10 @@ impl TargetEnvironment for DummyEnvironment {
fn heap_access_spectre_mitigation(&self) -> bool {
false
}

fn proof_carrying_code(&self) -> bool {
false
}
}

impl<'data> ModuleEnvironment<'data> for DummyEnvironment {
Expand Down
3 changes: 3 additions & 0 deletions cranelift/wasm/src/environ/spec.rs
Expand Up @@ -51,6 +51,9 @@ pub trait TargetEnvironment: TypeConvert {
/// Whether to enable Spectre mitigations for heap accesses.
fn heap_access_spectre_mitigation(&self) -> bool;

/// Whether to add proof-carrying-code facts to verify memory accesses.
fn proof_carrying_code(&self) -> bool;

/// Get the Cranelift integer type to use for native pointers.
///
/// This returns `I64` for 64-bit architectures and `I32` for 32-bit architectures.
Expand Down
5 changes: 4 additions & 1 deletion cranelift/wasm/src/heap.rs
@@ -1,6 +1,6 @@
//! Heaps to implement WebAssembly linear memories.

use cranelift_codegen::ir::{GlobalValue, Type};
use cranelift_codegen::ir::{GlobalValue, MemoryType, Type};
use cranelift_entity::entity_impl;

/// An opaque reference to a [`HeapData`][crate::HeapData].
Expand Down Expand Up @@ -82,6 +82,9 @@ pub struct HeapData {

/// The index type for the heap.
pub index_type: Type,

/// The memory type for the pointed-to memory, if using proof-carrying code.
pub memory_type: Option<MemoryType>,
}

/// Style of heap including style-specific information.
Expand Down

0 comments on commit f8d59bd

Please sign in to comment.