Skip to content

Commit

Permalink
YJIT: Define jmp_ptr_bytes on CodeBlock
Browse files Browse the repository at this point in the history
  • Loading branch information
k0kubun committed Apr 10, 2023
1 parent 99a4cf7 commit 8b5b6ea
Show file tree
Hide file tree
Showing 3 changed files with 36 additions and 31 deletions.
15 changes: 8 additions & 7 deletions yjit/src/asm/mod.rs
Expand Up @@ -2,10 +2,6 @@ use std::cell::RefCell;
use std::fmt;
use std::mem;
use std::rc::Rc;
#[cfg(target_arch = "x86_64")]
use crate::backend::x86_64::jmp_ptr_bytes;
#[cfg(target_arch = "aarch64")]
use crate::backend::arm64::jmp_ptr_bytes;
use crate::core::IseqPayload;
use crate::core::for_each_off_stack_iseq_payload;
use crate::core::for_each_on_stack_iseq_payload;
Expand Down Expand Up @@ -123,7 +119,7 @@ impl CodeBlock {
page_size,
write_pos: 0,
past_page_bytes: 0,
page_end_reserve: jmp_ptr_bytes(),
page_end_reserve: 0,
label_addrs: Vec::new(),
label_names: Vec::new(),
label_refs: Vec::new(),
Expand All @@ -133,6 +129,7 @@ impl CodeBlock {
dropped_bytes: false,
freed_pages,
};
cb.page_end_reserve = cb.jmp_ptr_bytes();
cb.write_pos = cb.page_start();
cb
}
Expand Down Expand Up @@ -196,7 +193,7 @@ impl CodeBlock {
self.write_pos = dst_pos;
let dst_ptr = self.get_write_ptr();
self.write_pos = src_pos;
self.without_page_end_reserve(|cb| assert!(cb.has_capacity(jmp_ptr_bytes())));
self.without_page_end_reserve(|cb| assert!(cb.has_capacity(cb.jmp_ptr_bytes())));

// Generate jmp_ptr from src_pos to dst_pos
self.without_page_end_reserve(|cb| {
Expand Down Expand Up @@ -242,6 +239,10 @@ impl CodeBlock {
self.mem_block.borrow().mapped_region_size()
}

pub fn virtual_region_size(&self) -> usize {
self.mem_block.borrow().virtual_region_size()
}

/// Return the number of code pages that have been mapped by the VirtualMemory.
pub fn num_mapped_pages(&self) -> usize {
// CodeBlock's page size != VirtualMem's page size on Linux,
Expand Down Expand Up @@ -287,7 +288,7 @@ impl CodeBlock {
if cfg!(debug_assertions) && !cfg!(test) {
// Leave illegal instructions at the beginning of each page to assert
// we're not accidentally crossing page boundaries.
start += jmp_ptr_bytes();
start += self.jmp_ptr_bytes();
}
start
}
Expand Down
42 changes: 22 additions & 20 deletions yjit/src/backend/arm64/mod.rs
Expand Up @@ -39,22 +39,24 @@ pub const _C_RET_OPND: Opnd = Opnd::Reg(X0_REG);
pub const C_SP_REG: A64Opnd = X31;
pub const C_SP_STEP: i32 = 16;

// The maximum number of bytes that can be generated by emit_jmp_ptr.
pub fn jmp_ptr_bytes() -> usize {
// b instruction's offset is encoded as imm26 times 4. It can jump to
// +/-128MiB, so this can be used when --yjit-exec-mem-size <= 128.
let num_insns = if b_offset_fits_bits(get_option!(exec_mem_size) as i64 / 4) {
1 // b instruction
} else {
5 // 4 instructions to load a 64-bit absolute address + br instruction
};
num_insns * 4
}
impl CodeBlock {
// The maximum number of bytes that can be generated by emit_jmp_ptr.
pub fn jmp_ptr_bytes(&self) -> usize {
// b instruction's offset is encoded as imm26 times 4. It can jump to
// +/-128MiB, so this can be used when --yjit-exec-mem-size <= 128.
let num_insns = if b_offset_fits_bits(self.virtual_region_size() as i64 / 4) {
1 // b instruction
} else {
5 // 4 instructions to load a 64-bit absolute address + br instruction
};
num_insns * 4
}

// The maximum number of instructions that can be generated by emit_conditional_jump.
fn conditional_jump_insns() -> i32 {
// The worst case is instructions for a jump + bcond.
jmp_ptr_bytes() as i32 / 4 + 1
// The maximum number of instructions that can be generated by emit_conditional_jump.
fn conditional_jump_insns(&self) -> i32 {
// The worst case is instructions for a jump + bcond.
self.jmp_ptr_bytes() as i32 / 4 + 1
}
}

/// Map Opnd to A64Opnd
Expand Down Expand Up @@ -126,8 +128,8 @@ fn emit_jmp_ptr(cb: &mut CodeBlock, dst_ptr: CodePtr, padding: bool) {
// Make sure it's always a consistent number of
// instructions in case it gets patched and has to
// use the other branch.
assert!(num_insns * 4 <= jmp_ptr_bytes());
for _ in num_insns..(jmp_ptr_bytes() / 4) {
assert!(num_insns * 4 <= cb.jmp_ptr_bytes());
for _ in num_insns..(cb.jmp_ptr_bytes() / 4) {
nop(cb);
}
}
Expand Down Expand Up @@ -749,8 +751,8 @@ impl Assembler
// We need to make sure we have at least 6 instructions for
// every kind of jump for invalidation purposes, so we're
// going to write out padding nop instructions here.
assert!(num_insns <= conditional_jump_insns());
for _ in num_insns..conditional_jump_insns() { nop(cb); }
assert!(num_insns <= cb.conditional_jump_insns());
for _ in num_insns..cb.conditional_jump_insns() { nop(cb); }
}
},
Target::Label(label_idx) => {
Expand Down Expand Up @@ -1093,7 +1095,7 @@ impl Assembler
Insn::RegTemps(_) |
Insn::SpillTemp(_) => (), // just a reg alloc signal, no code
Insn::PadInvalPatch => {
while (cb.get_write_pos().saturating_sub(std::cmp::max(start_write_pos, cb.page_start_pos()))) < jmp_ptr_bytes() && !cb.has_dropped_bytes() {
while (cb.get_write_pos().saturating_sub(std::cmp::max(start_write_pos, cb.page_start_pos()))) < cb.jmp_ptr_bytes() && !cb.has_dropped_bytes() {
nop(cb);
}
}
Expand Down
10 changes: 6 additions & 4 deletions yjit/src/backend/x86_64/mod.rs
Expand Up @@ -34,8 +34,10 @@ pub const _C_ARG_OPNDS: [Opnd; 6] = [
pub const C_RET_REG: Reg = RAX_REG;
pub const _C_RET_OPND: Opnd = Opnd::Reg(RAX_REG);

// The number of bytes that are generated by jmp_ptr
pub fn jmp_ptr_bytes() -> usize { 6 }
impl CodeBlock {
// The number of bytes that are generated by jmp_ptr
pub fn jmp_ptr_bytes() -> usize { 6 }
}

/// Map Opnd to X86Opnd
impl From<Opnd> for X86Opnd {
Expand Down Expand Up @@ -718,8 +720,8 @@ impl Assembler
Insn::SpillTemp(_) => (), // just a reg alloc signal, no code
Insn::PadInvalPatch => {
let code_size = cb.get_write_pos().saturating_sub(std::cmp::max(start_write_pos, cb.page_start_pos()));
if code_size < jmp_ptr_bytes() {
nop(cb, (jmp_ptr_bytes() - code_size) as u32);
if code_size < cb.jmp_ptr_bytes() {
nop(cb, (cb.jmp_ptr_bytes() - code_size) as u32);
}
}

Expand Down

0 comments on commit 8b5b6ea

Please sign in to comment.