Skip to content

Commit

Permalink
Page walk POC
Browse files Browse the repository at this point in the history
  • Loading branch information
kumargu committed Aug 17, 2021
1 parent 5e973ea commit b088770
Show file tree
Hide file tree
Showing 4 changed files with 121 additions and 93 deletions.
1 change: 1 addition & 0 deletions src/gdb_server/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ mod walker;
#[allow(unused_imports)]
use target::*;
pub use util::*;
pub use walker::Walker;

pub type DynResult<T> = Result<T, Box<dyn std::error::Error>>;

Expand Down
13 changes: 13 additions & 0 deletions src/gdb_server/src/target.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ use crate::DynResult;
#[cfg(target_arch = "x86_64")]
pub use kernel::loader::elf::Elf64_Phdr;

use super::{Walker};

pub struct FirecrackerGDBServer {
pub guest_memory: GuestMemoryMmap,

Expand Down Expand Up @@ -121,6 +123,9 @@ impl FirecrackerGDBServer {
) {
Ok(addr) => {
phys_addr = addr;
let (paddr, psize) = Walker::virt_to_phys(linear_addr, &self.guest_memory, &self.guest_state)?;
eprintln!("Linear Address is: {}. Translated addresses using phdrs headers: {} \
Translated Address using page walks: {} Page size {}", linear_addr, phys_addr, paddr, psize);
}
// We dont want to interrupt the whole debugging process because of an invalid address
// This breakpoint simply wont be hit
Expand Down Expand Up @@ -384,6 +389,14 @@ impl Target for FirecrackerGDBServer {
/// Function that is called when the user or the GDB client requests a number of val.len
/// bytes from the guest memory at address 'addrs'
fn read_addrs(&mut self, addrs: u64, val: &mut [u8]) -> Result<bool, Self::Error> {
eprintln!("Reached here....");
if let Ok(phy_addr) = Debugger::virt_to_phys(addrs, &self.guest_memory, &self.guest_state, &self.e_phdrs) {
let (paddr, psize) = Walker::virt_to_phys(addrs, &self.guest_memory, &self.guest_state)?;
// eprintln!("{} ", phy_addr);

eprintln!("{} {} {} ", phy_addr, paddr, psize);
}

if let Ok(phys_addr) =
Debugger::virt_to_phys(addrs, &self.guest_memory, &self.guest_state, &self.e_phdrs)
{
Expand Down
19 changes: 17 additions & 2 deletions src/gdb_server/src/util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ pub use kernel::loader::elf::{Elf64_Phdr, PT_LOAD};

use super::kvm_bindings::*;
use super::{GuestAddress, GuestMemoryMmap, VcpuFd};
use kernel::loader::elf::elf64_phdr;

// See Chapter 2.5 (Control Registers), Volume 3A in Intel Arch SW Developer's Manual.
// Bit 0 of CR0 register on x86 architecture
Expand Down Expand Up @@ -97,6 +98,7 @@ pub enum DebuggerError {
IoctlError(Error),
InvalidLinearAddress,
UnsupportedPagingStrategy,
PageNotFound
}

impl Display for DebuggerError {
Expand Down Expand Up @@ -180,7 +182,6 @@ impl Debugger {
) -> Result<u64, DebuggerError> {
let mut linear_addr = addr;
let pt_level = Debugger::get_paging_strategy(&guest_state.special_regs);

let mut paddr: u64;
let mut mask: u64;
let mut movem;
Expand Down Expand Up @@ -208,8 +209,16 @@ impl Debugger {
return Err(DebuggerError::MemoryError);
}

// first level checks ....
eprintln!("first level checks Table entry {} {} {} ", addr, paddr, table_entry);

if Debugger::check_entry(table_entry, TABLE_ENTRY_RSVD_BITS[0]).is_err() {
return Debugger::fixup_pointer(addr, e_phdrs);
eprintln!("checking fixup pointer {} {} {} ", addr, paddr, table_entry);
let mut value = Debugger::fixup_pointer(addr, e_phdrs);

eprintln!("value returned from early boot up phase {} {:?} {} ", addr, value, table_entry);

return value;
}

// There is one loop iteration for each page-table level (PDPT, PDT, PT);
Expand Down Expand Up @@ -269,6 +278,9 @@ impl Debugger {
// After each page table iteration we check whether the current entry is valid.
// If that is not the case, we try saving the translation process by skipping
// the page tables altogether and using direct translation through offset subtraction.

eprintln!("Table entry {} {} {} ", addr, paddr, table_entry);

if Debugger::check_entry(table_entry, TABLE_ENTRY_RSVD_BITS[rsvd_idx]).is_err() {
return Debugger::fixup_pointer(addr, e_phdrs);
}
Expand Down Expand Up @@ -317,9 +329,12 @@ impl Debugger {
/// values of bits that that should not be set.
fn check_entry(entry: u64, reserved_bits: u64) -> Result<(), DebuggerError> {
if entry & BIT_P == 0 {
eprintln!("reached here in no bits set");
return Err(DebuggerError::InvalidLinearAddress);
}
if entry & reserved_bits != 0 {
eprintln!("reached here in reserved bits set");

return Err(DebuggerError::InvalidLinearAddress);
}

Expand Down
181 changes: 90 additions & 91 deletions src/gdb_server/src/walker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ use crate::vm_memory::Bytes;

use super::{GuestAddress, GuestMemoryMmap};
use super::kvm_bindings::*;
use super::{FullVcpuState, DebuggerError};

// If 1, enable paging and use the § CR3 register, else disable paging.
const CR0_PG_MASK: u64 = 1 << 31;
Expand Down Expand Up @@ -43,110 +44,108 @@ const PAGE_SIZE_4K: u64 = 4 * 1024;
const PAGE_SIZE_2M: u64 = 2 * 1024 * 1024;
const PAGE_SIZE_1G: u64 = 1024 * 1024 * 1024;

#[derive(Debug)]
pub enum Error {
UnsupportedPagingStrategy,
VirtAddrTranslationError,
}

#[derive(Default, Clone)]
pub struct FullVcpuState {
pub regular_regs: kvm_regs,
pub special_regs: kvm_sregs,
}

pub type Result<T> = std::result::Result<T, Error>;
pub type Result<T> = std::result::Result<T, DebuggerError>;

//https://github.com/crash-utility/crash/blob/master/qemu.c#L72
fn virt_to_phys(
vaddr: u64,
guest_memory: &GuestMemoryMmap,
guest_state: &FullVcpuState,
) -> Result<(u64, u64)> {
// Moves from one page table entry to next page table entry.
fn walk(
guest_memory: &GuestMemoryMmap,
table_entry: u64,
vaddr: u64,
page_level: usize,
) -> Result<u64> {
let page_number = table_entry & PTE_ADDR_MASK;
let paddr = page_number + page_table_offset(vaddr, page_level);
let next_entry: u64 = guest_memory.read_obj(GuestAddress(paddr))
.map_err(|_| Error::VirtAddrTranslationError)?;

Ok(next_entry)
}

fn page_offset(vaddr: u64, page_size: u64) -> u64 {
// Offset = (address reference % page size)
// vaddr % page_size
vaddr & (page_size - 1)
}

fn page_table_offset(addr: u64, level: usize) -> u64 {
// 12 bits offset with 9 bits of for each level.
let offset = (level - 1) * 9 + 12;
// Shifting right to 12 bits in binary is equivalent to shifting
// a hexadecimal number 3 places to the right
// eg - (((addr >> 39) & 0x1ff) << 3))
((addr >> offset) & 0x1ff) << 3
}

if guest_state.special_regs.cr0 & CR0_PG_MASK == 0 {
return Ok((vaddr, PAGE_SIZE_4K));
}
pub struct Walker;

if guest_state.special_regs.cr4 & CR4_PAE_MASK == 0 {
return Err(Error::VirtAddrTranslationError);
}

if guest_state.special_regs.efer & MSR_EFER_LMA != 0 {
let mut pg_lvl_5_ent: Option<u64> = None;
let pg_lvl_4_ent;
#[cfg(target_arch = "x86_64")]
impl Walker {
pub fn virt_to_phys(
vaddr: u64,
guest_memory: &GuestMemoryMmap,
guest_state: &FullVcpuState,
) -> Result<(u64, u64)> {
// Moves from one page table entry to next page table entry.
fn walk(
guest_memory: &GuestMemoryMmap,
table_entry: u64,
vaddr: u64,
page_level: usize,
) -> Result<u64> {
let page_number = table_entry & PTE_ADDR_MASK;
let paddr = page_number + page_table_offset(vaddr, page_level);
let next_entry: u64 = guest_memory.read_obj(GuestAddress(paddr))
.map_err(|_| DebuggerError::InvalidLinearAddress)?;

println!(
"level {} vaddr {:x} table-addr {:x} mask {:x} next-entry {:x} offset {:x}",
page_level,
vaddr,
table_entry,
PTE_ADDR_MASK,
next_entry,
page_table_offset(vaddr, page_level)
);

// TODO add some doc when this can happen.
// This can possibly happen when we paging is not enabled during early boot process.
if next_entry & PAGE_PRESENT == 0 {
eprintln!("Page not present");
return Err(DebuggerError::PageNotFound);
}

Ok(next_entry)
}

if guest_state.special_regs.cr4 & CR4_LA57_MASK != 0 {
// 5 level paging enabled
// The first paging structure used for any translation is located at the physical address in CR3
pg_lvl_5_ent = Some(walk(guest_memory, guest_state.special_regs.cr3, vaddr, 5)?);
fn page_offset(vaddr: u64, page_size: u64) -> u64 {
// Offset = (address reference % page size)
// vaddr % page_size
vaddr & (page_size - 1)
}

if let Some(ent) = pg_lvl_5_ent {
pg_lvl_4_ent = walk(guest_memory, ent, vaddr, 4)?;
} else {
pg_lvl_4_ent = walk(guest_memory, guest_state.special_regs.cr3, vaddr, 4)?;
fn page_table_offset(addr: u64, level: usize) -> u64 {
// 12 bits offset with 9 bits of for each level.
let offset = (level - 1) * 9 + 12;
// Shifting right to 12 bits in binary is equivalent to shifting
// a hexadecimal number 3 places to the right
// eg - ((addr >> 39) & 0x1ff) << 3))
((addr >> offset) & 0x1ff) << 3
}

//Level 3
let pg_lvl_3_ent = walk(guest_memory, pg_lvl_4_ent, vaddr, 3)?;
// Till now, we have traversed 18 bits or 27 bits (for 5 level paging) and if we see
// PAGE_PSE_MASK set, we clearly have space for a 1G page .
if pg_lvl_3_ent & PAGE_PSE_MASK != 0 {
// Find the page address through the page table entry
let page_addr = pg_lvl_3_ent & PTE_ADDR_MASK;
//Find the offset within the page through the linear address
let offset = page_offset(vaddr, PAGE_SIZE_1G);
//Physical address = page address + page offset
let paddr = page_addr | offset;
return Ok((paddr, PAGE_SIZE_1G));
if guest_state.special_regs.cr0 & CR0_PG_MASK == 0 {
return Ok((vaddr, PAGE_SIZE_4K));
}

//Level 2
let pg_lvl_2_ent = walk(guest_memory, pg_lvl_3_ent, vaddr, 2)?;
if pg_lvl_2_ent & PAGE_PSE_MASK != 0 {
let page_addr = pg_lvl_2_ent & PTE_ADDR_MASK;
let offset = page_offset(vaddr, PAGE_SIZE_2M);
// if guest_state.special_regs.cr4 & CR4_PAE_MASK == 0 {
// // return Err(Error::InvalidLinearAddress);
// return Err(DebuggerError::InvalidState);
// }

if guest_state.special_regs.efer & MSR_EFER_LMA != 0 {
let pg_lvl_4_ent = walk(guest_memory, guest_state.special_regs.cr3, vaddr, 4)?;
//Level 3
let pg_lvl_3_ent = walk(guest_memory, pg_lvl_4_ent, vaddr, 3)?;
// Till now, we have traversed 18 bits or 27 bits (for 5 level paging) and if we see
// PAGE_PSE_MASK set, we clearly have space for a 1G page .
if pg_lvl_3_ent & PAGE_PSE_MASK != 0 {
// Find the page address through the page table entry
let page_addr = pg_lvl_3_ent & PTE_ADDR_MASK;
//Find the offset within the page through the linear address
let offset = page_offset(vaddr, PAGE_SIZE_1G);
//Physical address = page address + page offset
let paddr = page_addr | offset;
return Ok((paddr, PAGE_SIZE_1G));
}

//Level 2
let pg_lvl_2_ent = walk(guest_memory, pg_lvl_3_ent, vaddr, 2)?;
if pg_lvl_2_ent & PAGE_PSE_MASK != 0 {
let page_addr = pg_lvl_2_ent & PTE_ADDR_MASK;
let offset = page_offset(vaddr, PAGE_SIZE_2M);
let paddr = page_addr | offset;
return Ok((paddr, PAGE_SIZE_2M));
}

//Level 1
let pg_lvl_1_ent = walk(guest_memory, pg_lvl_2_ent, vaddr, 1)?;
let page_addr = pg_lvl_1_ent & PTE_ADDR_MASK;
let offset = page_offset(vaddr, PAGE_SIZE_4K);
let paddr = page_addr | offset;
return Ok((paddr, PAGE_SIZE_2M));
return Ok((paddr, PAGE_SIZE_4K));
}

//Level 1
let pg_lvl_1_ent = walk(guest_memory, pg_lvl_2_ent, vaddr, 1)?;
let page_addr = pg_lvl_1_ent & PTE_ADDR_MASK;
let offset = page_offset(vaddr, PAGE_SIZE_2M);
let paddr = page_addr | offset;
return Ok((paddr, PAGE_SIZE_4K));
Err(DebuggerError::InvalidState)
}

Err(Error::VirtAddrTranslationError)
}

0 comments on commit b088770

Please sign in to comment.