Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock
Original file line number Diff line number Diff line change
Expand Up @@ -2745,6 +2745,7 @@ version = "0.0.0"
dependencies = [
"anyhow",
"bitfield-struct 0.10.1",
"bitvec",
"build_rs_guest_arch",
"fs-err",
"getrandom 0.3.2",
Expand Down
1 change: 1 addition & 0 deletions openhcl/hcl/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ inspect.workspace = true
user_driver.workspace = true

anyhow.workspace = true
bitvec.workspace = true
parking_lot.workspace = true
signal-hook.workspace = true
thiserror.workspace = true
Expand Down
36 changes: 36 additions & 0 deletions openhcl/hcl/src/ioctl.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ use crate::protocol::HCL_VMSA_PAGE_OFFSET;
use crate::protocol::MSHV_APIC_PAGE_OFFSET;
use crate::protocol::hcl_intr_offload_flags;
use crate::protocol::hcl_run;
use bitvec::vec::BitVec;
use deferred::RegisteredDeferredActions;
use deferred::push_deferred_action;
use deferred::register_deferred_actions;
Expand Down Expand Up @@ -392,6 +393,7 @@ mod ioctls {
const MSHV_VTL_RMPQUERY: u16 = 0x35;
const MSHV_INVLPGB: u16 = 0x36;
const MSHV_TLBSYNC: u16 = 0x37;
const MSHV_KICKCPUS: u16 = 0x38;

#[repr(C)]
#[derive(Copy, Clone)]
Expand Down Expand Up @@ -606,6 +608,14 @@ mod ioctls {
MSHV_IOCTL,
MSHV_TLBSYNC
);

ioctl_write_ptr!(
/// Kick CPUs.
hcl_kickcpus,
MSHV_IOCTL,
MSHV_KICKCPUS,
protocol::hcl_kick_cpus
);
}

/// The `/dev/mshv_vtl_low` device for accessing VTL0 memory.
Expand Down Expand Up @@ -3241,4 +3251,30 @@ impl Hcl {
hcl_tlbsync(self.mshv_vtl.file.as_raw_fd()).expect("should always succeed");
}
}

/// Causes the specified CPUs to be woken out of a lower VTL.
pub fn kick_cpus(
&self,
cpus: impl IntoIterator<Item = u32>,
cancel_run: bool,
wait_for_other_cpus: bool,
) {
let mut cpu_bitmap: BitVec<u8> = BitVec::from_vec(vec![0; self.vps.len().div_ceil(8)]);
for cpu in cpus {
cpu_bitmap.set(cpu as usize, true);
}

let data = protocol::hcl_kick_cpus {
len: cpu_bitmap.len() as u64,
cpu_mask: cpu_bitmap.as_bitptr().pointer(),
flags: protocol::hcl_kick_cpus_flags::new()
.with_cancel_run(cancel_run)
.with_wait_for_other_cpus(wait_for_other_cpus),
};

// SAFETY: ioctl has no prerequisites.
unsafe {
hcl_kickcpus(self.mshv_vtl.file.as_raw_fd(), &data).expect("should always succeed");
}
}
}
18 changes: 18 additions & 0 deletions openhcl/hcl/src/protocol.rs
Original file line number Diff line number Diff line change
Expand Up @@ -275,3 +275,21 @@ pub struct tdx_vp_context {

const _: () = assert!(core::mem::offset_of!(tdx_vp_context, gpr_list) + 272 == 512);
const _: () = assert!(size_of::<tdx_vp_context>() == 1024);

#[bitfield(u64)]
#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
pub struct hcl_kick_cpus_flags {
#[bits(1)]
pub wait_for_other_cpus: bool,
#[bits(1)]
pub cancel_run: bool,
#[bits(62)]
reserved: u64,
}

#[repr(C)]
pub struct hcl_kick_cpus {
pub len: u64,
pub cpu_mask: *const u8,
pub flags: hcl_kick_cpus_flags,
}
32 changes: 15 additions & 17 deletions openhcl/virt_mshv_vtl/src/processor/tdx/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4277,21 +4277,19 @@ impl TdxTlbLockFlushAccess<'_> {
) {
match processor_set {
Some(processors) => {
self.wake_processors_for_tlb_flush_inner(
target_vtl,
processors.iter().map(|x| x as usize),
);
}
None => {
self.wake_processors_for_tlb_flush_inner(target_vtl, 0..self.partition.vps.len())
self.wake_processors_for_tlb_flush_inner(target_vtl, processors);
}
None => self.wake_processors_for_tlb_flush_inner(
target_vtl,
0..(self.partition.vps.len() as u32),
),
}
}

fn wake_processors_for_tlb_flush_inner(
&mut self,
target_vtl: GuestVtl,
processors: impl Iterator<Item = usize>,
processors: impl IntoIterator<Item = u32>,
) {
// Use SeqCst ordering to ensure that we are observing the most
// up-to-date value from other VPs. Otherwise we might not send a
Expand All @@ -4301,15 +4299,15 @@ impl TdxTlbLockFlushAccess<'_> {
// We use a single fence to avoid having to take a SeqCst load
// for each VP.
std::sync::atomic::fence(Ordering::SeqCst);
for target_vp in processors {
if self.vp_index.index() as usize != target_vp
&& self.shared.active_vtl[target_vp].load(Ordering::Relaxed) == target_vtl as u8
{
self.partition.vps[target_vp].wake_vtl2();
}
}

// TODO TDX GUEST VSM: We need to wait here until all woken VPs actually enter VTL 2.
self.partition.hcl.kick_cpus(
processors.into_iter().filter(|&vp| {
vp != self.vp_index.index()
&& self.shared.active_vtl[vp as usize].load(Ordering::Relaxed)
== target_vtl as u8
}),
true,
true,
);
}
}

Expand Down