diff --git a/hfo2/src/abi.rs b/hfo2/src/abi.rs index 6b9338439..3487f7573 100644 --- a/hfo2/src/abi.rs +++ b/hfo2/src/abi.rs @@ -16,6 +16,7 @@ use crate::types::*; +#[derive(Clone, Copy)] pub enum HfVCpuRunReturn { /// The vCPU has been preempted but still has work to do. If the scheduling /// quantum has not expired, the scheduler MUST call `hf_vcpu_run` on the diff --git a/hfo2/src/addr.rs b/hfo2/src/addr.rs index 6f33aeb0b..ff71560e0 100644 --- a/hfo2/src/addr.rs +++ b/hfo2/src/addr.rs @@ -19,9 +19,6 @@ use core::fmt; use crate::arch::*; use crate::types::*; -// TODO: Refactor type names and remove this. -#[allow(non_camel_case_types)] - // TODO: Some codes (mm.rs, mpool.rs, page.rs and etc.) treats memory address // with primitive types (usually usize.) Refactor them to use `*addr_t` types. diff --git a/hfo2/src/api.rs b/hfo2/src/api.rs index b4b30f9f9..a1adb1810 100644 --- a/hfo2/src/api.rs +++ b/hfo2/src/api.rs @@ -14,7 +14,7 @@ * limitations under the License. */ -use core::mem::{self, MaybeUninit}; +use core::mem::{self, ManuallyDrop, MaybeUninit}; use core::ptr; use core::sync::atomic::Ordering; @@ -30,6 +30,7 @@ use crate::spci::*; use crate::spinlock::*; use crate::std::*; use crate::types::*; +use crate::utils::*; use crate::vm::*; // To eliminate the risk of deadlocks, we define a partial order for the acquisition of locks held @@ -62,13 +63,16 @@ pub unsafe extern "C" fn api_init(ppool: *const MPool) { /// This triggers the scheduling logic to run. Run in the context of secondary /// VM to cause HF_VCPU_RUN to return and the primary VM to regain control of /// the cpu. -unsafe fn api_switch_to_primary( - current: *mut VCpu, +unsafe fn switch_to_primary( + current: &mut VCpuExecutionLocked, mut primary_ret: HfVCpuRunReturn, secondary_state: VCpuStatus, ) -> *mut VCpu { let primary = vm_find(HF_PRIMARY_VM_ID); - let next = vm_get_vcpu(primary, cpu_index((*current).cpu) as spci_vcpu_index_t); + let next = vm_get_vcpu( + primary, + cpu_index(current.get_inner().cpu) as spci_vcpu_index_t, + ); // If the secondary is blocked but has a timer running, sleep until the // timer fires rather than indefinitely. @@ -89,10 +93,18 @@ unsafe fn api_switch_to_primary( } // Set the return value for the primary VM's call to HF_VCPU_RUN. - arch_regs_set_retval(&mut (*next).regs, primary_ret.into_raw()); + // + // The use of `get_mut_unchecked()` is safe because the currently running pCPU implicitly owns + // `next`. Notice that `next` is the vCPU of the primary VM that corresponds to the currently + // running pCPU. + (*next) + .inner + .get_mut_unchecked() + .regs + .set_retval(primary_ret.into_raw()); // Mark the current vcpu as waiting. - (*current).state = secondary_state; + current.get_inner_mut().state = secondary_state; next } @@ -100,26 +112,29 @@ unsafe fn api_switch_to_primary( /// Returns to the primary vm and signals that the vcpu still has work to do so. #[no_mangle] pub unsafe extern "C" fn api_preempt(current: *mut VCpu) -> *mut VCpu { + let mut current = ManuallyDrop::new(VCpuExecutionLocked::from_raw(current)); let ret = HfVCpuRunReturn::Preempted; - api_switch_to_primary(current, ret, VCpuStatus::Ready) + switch_to_primary(&mut current, ret, VCpuStatus::Ready) } /// Puts the current vcpu in wait for interrupt mode, and returns to the primary /// vm. #[no_mangle] pub unsafe extern "C" fn api_wait_for_interrupt(current: *mut VCpu) -> *mut VCpu { + let mut current = ManuallyDrop::new(VCpuExecutionLocked::from_raw(current)); let ret = HfVCpuRunReturn::WaitForInterrupt { // `api_switch_to_primary` always initializes this variable. ns: HF_SLEEP_INDEFINITE, }; - api_switch_to_primary(current, ret, VCpuStatus::BlockedInterrupt) + switch_to_primary(&mut current, ret, VCpuStatus::BlockedInterrupt) } /// Puts the current vCPU in off mode, and returns to the primary VM. #[no_mangle] pub unsafe extern "C" fn api_vcpu_off(current: *mut VCpu) -> *mut VCpu { + let mut current = ManuallyDrop::new(VCpuExecutionLocked::from_raw(current)); let ret = HfVCpuRunReturn::WaitForInterrupt { // `api_switch_to_primary` always initializes this variable. ns: HF_SLEEP_INDEFINITE, @@ -129,7 +144,7 @@ pub unsafe extern "C" fn api_vcpu_off(current: *mut VCpu) -> *mut VCpu { // based on it. arch_timer_disable_current(); - api_switch_to_primary(current, ret, VCpuStatus::Off) + switch_to_primary(&mut current, ret, VCpuStatus::Off) } /// Returns to the primary vm to allow this cpu to be used for other tasks as @@ -138,54 +153,61 @@ pub unsafe extern "C" fn api_vcpu_off(current: *mut VCpu) -> *mut VCpu { /// SpciReturn::Success. #[no_mangle] pub unsafe extern "C" fn api_spci_yield(current: *mut VCpu, next: *mut *mut VCpu) -> SpciReturn { + let mut current = ManuallyDrop::new(VCpuExecutionLocked::from_raw(current)); let ret = HfVCpuRunReturn::Yield; - if (*(*current).vm).id == HF_PRIMARY_VM_ID { + if (*current.get_vcpu().vm).id == HF_PRIMARY_VM_ID { // Noop on the primary as it makes the scheduling decisions. return SpciReturn::Success; } - *next = api_switch_to_primary(current, ret, VCpuStatus::Ready); + *next = switch_to_primary(&mut current, ret, VCpuStatus::Ready); // SPCI_YIELD always returns SPCI_SUCCESS. SpciReturn::Success } -/// Switches to the primary so that it can switch to the target, or kick tit if -/// it is already running on a different physical CPU. -#[no_mangle] -pub unsafe extern "C" fn api_wake_up(current: *mut VCpu, target_vcpu: *mut VCpu) -> *mut VCpu { +unsafe fn wake_up(current: &mut VCpuExecutionLocked, target_vcpu: &VCpu) -> *mut VCpu { let ret = HfVCpuRunReturn::WakeUp { - vm_id: (*(*target_vcpu).vm).id, + vm_id: (*target_vcpu.vm).id, vcpu: vcpu_index(target_vcpu), }; - api_switch_to_primary(current, ret, VCpuStatus::Ready) + switch_to_primary(current, ret, VCpuStatus::Ready) +} + +/// Switches to the primary so that it can switch to the target, or kick tit if +/// it is already running on a different physical CPU. +#[no_mangle] +pub unsafe extern "C" fn api_wake_up(current: *mut VCpu, target_vcpu: *mut VCpu) -> *mut VCpu { + let mut current = ManuallyDrop::new(VCpuExecutionLocked::from_raw(current)); + wake_up(&mut current, &*target_vcpu) } /// Aborts the vCPU and triggers its VM to abort fully. #[no_mangle] pub unsafe extern "C" fn api_abort(current: *mut VCpu) -> *mut VCpu { + let mut current = ManuallyDrop::new(VCpuExecutionLocked::from_raw(current)); let ret = HfVCpuRunReturn::Aborted; dlog!( "Aborting VM {} vCPU {}\n", - (*(*current).vm).id, - vcpu_index(current) + (*current.get_vcpu().vm).id, + vcpu_index(current.get_vcpu()) ); - if (*(*current).vm).id == HF_PRIMARY_VM_ID { + if (*current.get_vcpu().vm).id == HF_PRIMARY_VM_ID { // TODO: what to do when the primary aborts? - loop { - // Do nothing. - } + spin_loop(); } - (*(*current).vm).aborting.store(true, Ordering::Relaxed); + (*current.get_vcpu().vm) + .aborting + .store(true, Ordering::Relaxed); // TODO: free resources once all vCPUs abort. - api_switch_to_primary(current, ret, VCpuStatus::Aborted) + switch_to_primary(&mut current, ret, VCpuStatus::Aborted) } /// Returns the ID of the VM. @@ -227,7 +249,7 @@ pub unsafe extern "C" fn api_vcpu_get_count( /// and can therefore be used by other pcpus. #[no_mangle] pub unsafe extern "C" fn api_regs_state_saved(vcpu: *mut VCpu) { - sl_unlock(&(*vcpu).execution_lock); + (*vcpu).inner.unlock_unchecked(); } /// Assuming that the arguments have already been checked by the caller, injects @@ -242,61 +264,22 @@ pub unsafe extern "C" fn api_regs_state_saved(vcpu: *mut VCpu) { /// TODO: this function was using `goto` originally and it is just /// implemented as copy-paste in Rust. unsafe fn internal_interrupt_inject( - target_vcpu: *mut VCpu, + target_vcpu: &VCpu, intid: intid_t, - current: *mut VCpu, + current: &mut VCpuExecutionLocked, next: *mut *mut VCpu, ) -> i64 { - let intid_index = intid as usize / INTERRUPT_REGISTER_BITS; - let intid_mask = 1u32 << (intid % INTERRUPT_REGISTER_BITS as u32); - let mut ret: i64 = 0; - - sl_lock(&(*target_vcpu).interrupts_lock); - - // We only need to change state and (maybe) trigger a virtual IRQ if it - // is enabled and was not previously pending. Otherwise we can skip - // everything except setting the pending bit. - // - // If you change this logic make sure to update the need_vm_lock logic - // above to match. - if (*target_vcpu).interrupts.enabled[intid_index] - & !(*target_vcpu).interrupts.pending[intid_index] - & intid_mask - == 0 - { - // goto out; - // Either way, make it pending. - (*target_vcpu).interrupts.pending[intid_index] |= intid_mask; - sl_unlock(&(*target_vcpu).interrupts_lock); - return ret; - } - - // Increment the count. - (*target_vcpu).interrupts.enabled_and_pending_count += 1; - - // Only need to update state if there was not already an interrupt enabled - // and pending. - if (*target_vcpu).interrupts.enabled_and_pending_count != 1 { - // goto out; - // Either way, make it pending. - (*target_vcpu).interrupts.pending[intid_index] |= intid_mask; - sl_unlock(&(*target_vcpu).interrupts_lock); - return ret; - } - - if (*(*current).vm).id == HF_PRIMARY_VM_ID { - // If the call came from the primary VM, let it know that it should - // run or kick the target vCPU. - ret = 1; - } else if current != target_vcpu && next != ptr::null_mut() { - *next = api_wake_up(current, target_vcpu); + if target_vcpu.interrupts.lock().inject(intid).is_ok() { + if (*current.get_vcpu().vm).id == HF_PRIMARY_VM_ID { + // If the call came from the primary VM, let it know that it should + // run or kick the target vCPU. + return 1; + } else if current.get_vcpu() as *const _ != target_vcpu as *const _ && !next.is_null() { + *next = wake_up(current, target_vcpu); + } } - // out: - // Either way, make it pending. - (*target_vcpu).interrupts.pending[intid_index] |= intid_mask; - sl_unlock(&(*target_vcpu).interrupts_lock); - return ret; + 0 } /// Prepares the vcpu to run by updating its state and fetching whether a return @@ -307,49 +290,36 @@ unsafe fn internal_interrupt_inject( /// - true if it succeeds to prepare `vcpu` to run. In this case, /// `vcpu.execution_lock` has acquired. unsafe fn api_vcpu_prepare_run( - current: *const VCpu, + current: &VCpuExecutionLocked, vcpu: *mut VCpu, - run_ret: *mut HfVCpuRunReturn, -) -> bool { - let ret; - - if !sl_try_lock(&(*vcpu).execution_lock) { + run_ret: HfVCpuRunReturn, +) -> Result { + let mut vcpu_inner = (*vcpu).inner.try_lock().ok_or_else(|| { // vCPU is running or prepared to run on another pCPU. // // It's ok not to return the sleep duration here because the other // physical CPU that is currently running this vCPU will return the // sleep duration if needed. The default return value is - // HfVCpuRunReturn::WaitForInterrupt, so no need to set it explicitly. - return false; - } + // HfVCpuRunReturn::WaitForInterrupt, so no need to set it + // explicitly. + run_ret + })?; if (*(*vcpu).vm).aborting.load(Ordering::Relaxed) { - if (*vcpu).state != VCpuStatus::Aborted { + if vcpu_inner.state != VCpuStatus::Aborted { dlog!( "Aborting VM {} vCPU {}\n", (*(*vcpu).vm).id, vcpu_index(vcpu) ); - (*vcpu).state = VCpuStatus::Aborted; - } - ret = false; - - if !ret { - sl_unlock(&(*vcpu).execution_lock); + vcpu_inner.state = VCpuStatus::Aborted; } - - return ret; + return Err(run_ret); } - match (*vcpu).state { + match vcpu_inner.state { VCpuStatus::Off | VCpuStatus::Aborted => { - ret = false; - - if !ret { - sl_unlock(&(*vcpu).execution_lock); - } - - return ret; + return Err(run_ret); } // A pending message allows the vCPU to run so the message can be @@ -360,19 +330,19 @@ unsafe fn api_vcpu_prepare_run( // dependencies in the common run case meaning the sensitive context // switch performance is consistent. VCpuStatus::BlockedMailbox if (*(*vcpu).vm).inner.lock().try_read() => { - arch_regs_set_retval(&mut (*vcpu).regs, SpciReturn::Success as uintreg_t); + vcpu_inner.regs.set_retval(SpciReturn::Success as uintreg_t); } // Allow virtual interrupts to be delivered. VCpuStatus::BlockedMailbox | VCpuStatus::BlockedInterrupt - if (*vcpu).interrupts.enabled_and_pending_count > 0 => + if (*vcpu).interrupts.lock().enabled_and_pending_count > 0 => { // break; } // The timer expired so allow the interrupt to be delivered. VCpuStatus::BlockedMailbox | VCpuStatus::BlockedInterrupt - if arch_timer_pending(&(*vcpu).regs) => + if vcpu_inner.regs.timer_pending() => { // break; } @@ -380,23 +350,17 @@ unsafe fn api_vcpu_prepare_run( // The vCPU is not ready to run, return the appropriate code to the // primary which called vcpu_run. VCpuStatus::BlockedMailbox | VCpuStatus::BlockedInterrupt => { - if arch_timer_enabled(&(*vcpu).regs) { - let ns = arch_timer_remaining_ns(&mut (*vcpu).regs); - - *run_ret = if (*vcpu).state == VCpuStatus::BlockedMailbox { + let run_ret = if !vcpu_inner.regs.timer_enabled() { + run_ret + } else { + let ns = vcpu_inner.regs.timer_remaining_ns(); + if vcpu_inner.state == VCpuStatus::BlockedMailbox { HfVCpuRunReturn::WaitForMessage { ns } } else { HfVCpuRunReturn::WaitForInterrupt { ns } - }; - } - - ret = false; - - if !ret { - sl_unlock(&(*vcpu).execution_lock); - } - - return ret; + } + }; + return Err(run_ret); } VCpuStatus::Ready => { @@ -405,15 +369,11 @@ unsafe fn api_vcpu_prepare_run( } // It has been decided that the vCPU should be run. - (*vcpu).cpu = (*current).cpu; - - ret = true; + vcpu_inner.cpu = current.get_inner().cpu; - if !ret { - sl_unlock(&(*vcpu).execution_lock); - } - - return ret; + // We want to keep the lock of vcpu.state because we're going to run. + mem::forget(vcpu_inner); + Ok(VCpuExecutionLocked::from_raw(vcpu)) } /// Runs the given vcpu of the given vm. @@ -421,17 +381,16 @@ unsafe fn api_vcpu_prepare_run( pub unsafe extern "C" fn api_vcpu_run( vm_id: spci_vm_id_t, vcpu_idx: spci_vcpu_index_t, - current: *const VCpu, + current: *mut VCpu, next: *mut *mut VCpu, ) -> u64 { - let vm; - let vcpu; + let mut current = ManuallyDrop::new(VCpuExecutionLocked::from_raw(current)); let mut ret = HfVCpuRunReturn::WaitForInterrupt { ns: HF_SLEEP_INDEFINITE, }; // Only the primary VM can switch vcpus. - if (*(*current).vm).id != HF_PRIMARY_VM_ID { + if (*current.get_vcpu().vm).id != HF_PRIMARY_VM_ID { return ret.into_raw(); } @@ -441,7 +400,7 @@ pub unsafe extern "C" fn api_vcpu_run( } // The requested VM must exist. - vm = vm_find(vm_id); + let vm = vm_find(vm_id); if vm.is_null() { return ret.into_raw(); } @@ -452,28 +411,34 @@ pub unsafe extern "C" fn api_vcpu_run( } // Update state if allowed. - vcpu = vm_get_vcpu(vm, vcpu_idx); - if !api_vcpu_prepare_run(current, vcpu, &mut ret) { - return ret.into_raw(); - } + let vcpu = vm_get_vcpu(vm, vcpu_idx); + let mut vcpu_locked = match api_vcpu_prepare_run(¤t, vcpu, ret) { + Ok(locked) => locked, + Err(ret) => return ret.into_raw(), + }; // Inject timer interrupt if timer has expired. It's safe to access // vcpu->regs here because api_vcpu_prepare_run already made sure that // regs_available was true (and then set it to false) before returning // true. - if arch_timer_pending(&mut (*vcpu).regs) { + if vcpu_locked.get_inner().regs.timer_pending() { // Make virtual timer interrupt pending. - internal_interrupt_inject(vcpu, HF_VIRTUAL_TIMER_INTID, vcpu, ptr::null_mut()); + internal_interrupt_inject( + &*vcpu, + HF_VIRTUAL_TIMER_INTID, + &mut vcpu_locked, + ptr::null_mut(), + ); // Set the mask bit so the hardware interrupt doesn't fire again. // Ideally we wouldn't do this because it affects what the secondary // vcPU sees, but if we don't then we end up with a loop of the // interrupt firing each time we try to return to the secondary vCPU. - arch_timer_mask(&mut (*vcpu).regs); + vcpu_locked.get_inner_mut().regs.timer_mask(); } // Switch to the vcpu. - *next = vcpu; + *next = vcpu_locked.into_raw() as *const _ as *mut _; // Set a placeholder return code to the scheduler. This will be overwritten // when the switch back to the primary occurs. @@ -489,7 +454,7 @@ pub unsafe extern "C" fn api_vcpu_run( unsafe fn waiter_result( vm_id: spci_vm_id_t, vm_inner: &VmInner, - current: *mut VCpu, + current: &mut VCpuExecutionLocked, next: *mut *mut VCpu, ) -> i64 { let ret = HfVCpuRunReturn::NotifyWaiters; @@ -506,7 +471,7 @@ unsafe fn waiter_result( // Switch back to the primary VM, informing it that there are waiters // that need to be notified. - *next = api_switch_to_primary(current, ret, VCpuStatus::Ready); + *next = switch_to_primary(current, ret, VCpuStatus::Ready); 0 } @@ -527,7 +492,8 @@ pub unsafe extern "C" fn api_vm_configure( current: *mut VCpu, next: *mut *mut VCpu, ) -> i64 { - let vm = (*current).vm; + let mut current = ManuallyDrop::new(VCpuExecutionLocked::from_raw(current)); + let vm = current.get_vcpu().vm; // The hypervisor's memory map must be locked for the duration of this // operation to ensure there will be sufficient memory to recover from @@ -544,7 +510,7 @@ pub unsafe extern "C" fn api_vm_configure( } // Tell caller about waiters, if any. - waiter_result((*vm).id, &vm_inner, current, next) + waiter_result((*vm).id, &vm_inner, &mut current, next) } /// Copies data from the sender's send buffer to the recipient's receive buffer @@ -558,7 +524,8 @@ pub unsafe extern "C" fn api_spci_msg_send( current: *mut VCpu, next: *mut *mut VCpu, ) -> SpciReturn { - let from = (*current).vm; + let mut current = ManuallyDrop::new(VCpuExecutionLocked::from_raw(current)); + let from = current.get_vcpu().vm; // TODO: Refactor the control flow of this function, and make this variable // immutable. @@ -688,7 +655,7 @@ pub unsafe extern "C" fn api_spci_msg_send( // Messages for the primary VM are delivered directly. if (*to).id == HF_PRIMARY_VM_ID { to_inner.set_read(); - *next = api_switch_to_primary(current, primary_ret, VCpuStatus::Ready); + *next = switch_to_primary(&mut current, primary_ret, VCpuStatus::Ready); return ret; } @@ -696,7 +663,7 @@ pub unsafe extern "C" fn api_spci_msg_send( // Return to the primary VM directly or with a switch. if (*from).id != HF_PRIMARY_VM_ID { - *next = api_switch_to_primary(current, primary_ret, VCpuStatus::Ready); + *next = switch_to_primary(&mut current, primary_ret, VCpuStatus::Ready); } return ret; @@ -712,7 +679,8 @@ pub unsafe extern "C" fn api_spci_msg_recv( current: *mut VCpu, next: *mut *mut VCpu, ) -> SpciReturn { - let vm = &*(*current).vm; + let mut current = ManuallyDrop::new(VCpuExecutionLocked::from_raw(current)); + let vm = &*current.get_vcpu().vm; let return_code: SpciReturn; let block = attributes.contains(SpciMsgRecvAttributes::BLOCK); @@ -741,7 +709,13 @@ pub unsafe extern "C" fn api_spci_msg_recv( // Don't block if there are enabled and pending interrupts, to match // behaviour of wait_for_interrupt. - if (*current).interrupts.enabled_and_pending_count > 0 { + if current + .get_vcpu() + .interrupts + .lock() + .enabled_and_pending_count + > 0 + { return return_code; } @@ -752,7 +726,7 @@ pub unsafe extern "C" fn api_spci_msg_recv( ns: HF_SLEEP_INDEFINITE, }; - *next = api_switch_to_primary(current, run_return, VCpuStatus::BlockedMailbox); + *next = switch_to_primary(&mut current, run_return, VCpuStatus::BlockedMailbox); } return return_code; @@ -825,7 +799,8 @@ pub unsafe extern "C" fn api_mailbox_waiter_get(vm_id: spci_vm_id_t, current: *c /// hf_mailbox_waiter_get. #[no_mangle] pub unsafe extern "C" fn api_mailbox_clear(current: *mut VCpu, next: *mut *mut VCpu) -> i64 { - let vm = (*current).vm; + let mut current = ManuallyDrop::new(VCpuExecutionLocked::from_raw(current)); + let vm = current.get_vcpu().vm; let ret; let mut vm_inner = (*vm).inner.lock(); match vm_inner.get_state() { @@ -836,7 +811,7 @@ pub unsafe extern "C" fn api_mailbox_clear(current: *mut VCpu, next: *mut *mut V ret = -1; } MailboxState::Read => { - ret = waiter_result((*vm).id, &vm_inner, current, next); + ret = waiter_result((*vm).id, &vm_inner, &mut current, next); vm_inner.set_empty(); } } @@ -853,38 +828,11 @@ pub unsafe extern "C" fn api_interrupt_enable( enable: bool, current: *mut VCpu, ) -> i64 { - let intid_index = intid as usize / INTERRUPT_REGISTER_BITS; - let intid_mask = 1u32 << (intid % INTERRUPT_REGISTER_BITS as u32); - - if intid >= HF_NUM_INTIDS { - return -1; - } - - sl_lock(&(*current).interrupts_lock); - if enable { - // If it is pending and was not enabled before, increment the count. - if ((*current).interrupts.pending[intid_index] - & !(*current).interrupts.enabled[intid_index] - & intid_mask) - != 0 - { - (*current).interrupts.enabled_and_pending_count += 1; - } - (*current).interrupts.enabled[intid_index] |= intid_mask; + if (*current).interrupts.lock().enable(intid, enable).is_ok() { + 0 } else { - // If it is pending and was enabled before, decrement the count. - if ((*current).interrupts.pending[intid_index] - & (*current).interrupts.enabled[intid_index] - & intid_mask) - != 0 - { - (*current).interrupts.enabled_and_pending_count -= 1; - } - (*current).interrupts.enabled[intid_index] &= !intid_mask; + -1 } - - sl_unlock(&(*current).interrupts_lock); - 0 } /// Returns the ID of the next pending interrupt for the calling vCPU, and @@ -892,27 +840,7 @@ pub unsafe extern "C" fn api_interrupt_enable( /// HF_INVALID_INTID if there are no pending interrupts. #[no_mangle] pub unsafe extern "C" fn api_interrupt_get(current: *mut VCpu) -> intid_t { - let mut first_interrupt = HF_INVALID_INTID; - - // Find the first enabled pending interrupt ID, returns it, and - // deactive it. - sl_lock(&(*current).interrupts_lock); - for i in 0..(HF_NUM_INTIDS as usize / INTERRUPT_REGISTER_BITS) { - let enabled_and_pending = - (*current).interrupts.enabled[i] & (*current).interrupts.pending[i]; - if enabled_and_pending != 0 { - let bit_index = enabled_and_pending.trailing_zeros(); - - // Mark it as no longer pending and decrement the count. - (*current).interrupts.pending[i] &= !(1u32 << bit_index); - (*current).interrupts.enabled_and_pending_count -= 1; - first_interrupt = (i * INTERRUPT_REGISTER_BITS) as u32 + bit_index; - break; - } - } - - sl_unlock(&(*current).interrupts_lock); - first_interrupt + (*current).interrupts.lock().get() } /// Returns whether the current vCPU is allowed to inject an interrupt into the @@ -945,6 +873,7 @@ pub unsafe extern "C" fn api_interrupt_inject( current: *mut VCpu, next: *mut *mut VCpu, ) -> i64 { + let mut current = ManuallyDrop::new(VCpuExecutionLocked::from_raw(current)); let target_vm = vm_find(target_vm_id); if intid >= HF_NUM_INTIDS { @@ -960,7 +889,7 @@ pub unsafe extern "C" fn api_interrupt_inject( return -1; } - if !is_injection_allowed(target_vm_id, &*current) { + if !is_injection_allowed(target_vm_id, current.get_vcpu()) { return -1; } @@ -971,10 +900,10 @@ pub unsafe extern "C" fn api_interrupt_inject( intid, target_vm_id, target_vcpu_idx, - (*(*current).vm).id, - (*(*current).cpu).id + (*current.get_vcpu().vm).id, + (*current.get_inner().cpu).id ); - internal_interrupt_inject(target_vcpu, intid, current, next) + internal_interrupt_inject(&*target_vcpu, intid, &mut current, next) } /// Clears a region of physical memory by overwriting it with zeros. The data is @@ -988,7 +917,10 @@ fn clear_memory(begin: paddr_t, end: paddr_t, ppool: &MPool) -> bool { // mapping of the whole range. Such an approach will limit the // changes to stage-1 tables and will allow only local invalidation. - if hypervisor_ptable.identity_map(begin, end, Mode::W, ppool).is_none() { + if hypervisor_ptable + .identity_map(begin, end, Mode::W, ppool) + .is_none() + { // TODO: partial defrag of failed range. // Recover any memory consumed in failed mapping. hypervisor_ptable.defrag(ppool); diff --git a/hfo2/src/arch/aarch64.rs b/hfo2/src/arch/aarch64.rs index 2405243d6..c0f347194 100644 --- a/hfo2/src/arch/aarch64.rs +++ b/hfo2/src/arch/aarch64.rs @@ -103,7 +103,13 @@ const REGS_GIC: usize = REGS_FREGS + 528; pub fn arch_cpu_module_init() { assert_eq!(offset_of!(Cpu, id), CPU_ID); assert_eq!(offset_of!(Cpu, stack_bottom), CPU_STACK_BOTTOM); - assert_eq!(offset_of!(VCpu, regs), VCPU_REGS); + // assert_eq!( + // offset_of!(VCpu, inner) + // + 8 // expected value of offset_of!(SpinLock, data), but it + // // is not working. see Gilnaa/memoffset#21. + // + offset_of!(VCpuInner, regs), + // VCPU_REGS + // ); assert_eq!(offset_of!(ArchRegs, lazy), REGS_LAZY); assert_eq!(offset_of!(ArchRegs, fp), REGS_FREGS); assert_eq!(offset_of!(ArchRegs, gic_ich_hcr_el2), REGS_GIC); diff --git a/hfo2/src/arch/mod.rs b/hfo2/src/arch/mod.rs index 28ca0195c..08b4b7ac6 100644 --- a/hfo2/src/arch/mod.rs +++ b/hfo2/src/arch/mod.rs @@ -34,7 +34,7 @@ extern "C" { /// Returns the number of nanoseconds remaining on the virtual timer as /// stored in the given `ArchRegs`, or 0 if it has already expired. This is /// undefined if the timer is not enabled. - pub fn arch_timer_remaining_ns(regs: *mut ArchRegs) -> u64; + pub fn arch_timer_remaining_ns(regs: *const ArchRegs) -> u64; /// Returns whether the timer is ready to fire: i.e. it is enabled, not /// masked, and the condition is met. diff --git a/hfo2/src/cpu.rs b/hfo2/src/cpu.rs index c3aff2e75..31f591cda 100644 --- a/hfo2/src/cpu.rs +++ b/hfo2/src/cpu.rs @@ -14,7 +14,7 @@ * limitations under the License. */ -use core::mem::{self, MaybeUninit}; +use core::mem::{self, ManuallyDrop, MaybeUninit}; use core::ptr; use crate::addr::*; @@ -55,7 +55,7 @@ extern "C" { /// /// This function must only be called on an arch_regs that is known not be in use /// by any other physical CPU. - pub fn arch_regs_set_retval(r: *mut ArchRegs, v: uintreg_t); + fn arch_regs_set_retval(r: *mut ArchRegs, v: uintreg_t); } const STACK_SIZE: usize = PAGE_SIZE; @@ -95,6 +95,130 @@ pub struct Interrupts { pub enabled_and_pending_count: u32, } +impl Interrupts { + pub fn id_to_index(intid: intid_t) -> Result<(usize, u32), ()> { + if intid >= HF_NUM_INTIDS { + return Err(()); + } + + let intid_index = intid as usize / INTERRUPT_REGISTER_BITS; + let intid_mask = 1u32 << (intid % INTERRUPT_REGISTER_BITS as u32); + + Ok((intid_index, intid_mask)) + } + + /// injects a virtual interrupt of the given ID into the given target vCPU. + /// Returns: + /// - None if no further action is needed. + /// - Some(()) if the vcpu had have no pending interrupt before, thus + /// proper scheduling is required. + pub fn inject(&mut self, intid: intid_t) -> Result<(), ()> { + let (intid_index, intid_mask) = Self::id_to_index(intid)?; + + // Make it pending. + let pending = self.pending[intid_index]; + self.pending[intid_index] |= intid_mask; + + // We only need to change state and (maybe) trigger a virtual IRQ if it + // is enabled and was not previously pending. Otherwise we can skip + // everything except setting the pending bit. + // + // If you change this logic make sure to update the need_vm_lock logic + // above to match. + if self.enabled[intid_index] & !pending & intid_mask == 0 { + return Err(()); + } + + // Increment the count. + self.enabled_and_pending_count += 1; + + // Only need to update state if there was not already an + // interrupt enabled and pending. + if self.enabled_and_pending_count != 1 { + Err(()) + } else { + Ok(()) + } + } + + /// Enables or disables a given interrupt ID for the calling vCPU. + pub fn enable(&mut self, intid: intid_t, enable: bool) -> Result<(), ()> { + let (intid_index, intid_mask) = Self::id_to_index(intid)?; + + if enable { + // If it is pending and was not enabled before, increment the count. + if (self.pending[intid_index] & !self.enabled[intid_index] & intid_mask) != 0 { + self.enabled_and_pending_count += 1; + } + self.enabled[intid_index] |= intid_mask; + } else { + // If it is pending and was enabled before, decrement the count. + if (self.pending[intid_index] & self.enabled[intid_index] & intid_mask) != 0 { + self.enabled_and_pending_count -= 1; + } + self.enabled[intid_index] &= !intid_mask; + } + + Ok(()) + } + + /// Returns the ID of the next pending interrupt for the calling vCPU, and + /// acknowledges it (i.e. marks it as no longer pending). Returns + /// HF_INVALID_INTID if there are no pending interrupts. + pub fn get(&mut self) -> intid_t { + // Find the first enabled pending interrupt ID, returns it, and + // deactive it. + for i in 0..(HF_NUM_INTIDS as usize / INTERRUPT_REGISTER_BITS) { + let enabled_and_pending = self.enabled[i] & self.pending[i]; + if enabled_and_pending != 0 { + let bit_index = enabled_and_pending.trailing_zeros(); + + // Mark it as no longer pending and decrement the count. + self.pending[i] &= !(1u32 << bit_index); + self.enabled_and_pending_count -= 1; + return (i * INTERRUPT_REGISTER_BITS) as u32 + bit_index; + } + } + + HF_INVALID_INTID + } +} + +impl ArchRegs { + /// Reset the register values other than the PC and argument which are set + /// with `arch_regs_set_pc_arg()`. + pub fn reset(&mut self, is_primary: bool, vm: &Vm, vcpu_id: cpu_id_t) { + unsafe { arch_regs_reset(self, is_primary, vm.id, vcpu_id, vm.get_ptable_raw()) } + } + + /// Updates the register holding the return value of a function. + pub fn set_retval(&mut self, v: uintreg_t) { + unsafe { arch_regs_set_retval(self, v) } + } + + /// Updates the given registers so that when a vcpu runs, it starts off at + /// the given address (pc) with the given argument. + pub fn set_pc_arg(&mut self, pc: ipaddr_t, arg: uintreg_t) { + unsafe { arch_regs_set_pc_arg(self, pc, arg) } + } + + pub fn timer_mask(&mut self) { + unsafe { arch_timer_mask(self) } + } + + pub fn timer_enabled(&self) -> bool { + unsafe { arch_timer_enabled(self) } + } + + pub fn timer_remaining_ns(&self) -> u64 { + unsafe { arch_timer_remaining_ns(self) } + } + + pub fn timer_pending(&self) -> bool { + unsafe { arch_timer_pending(self) } + } +} + #[repr(C)] pub struct VCpuFaultInfo { ipaddr: ipaddr_t, @@ -103,34 +227,81 @@ pub struct VCpuFaultInfo { mode: Mode, } -#[repr(C)] -pub struct VCpu { - /// Protects accesses to vCPU's state and architecture registers. If a - /// vCPU is running, its execution lock is logically held by the - /// running pCPU. - pub execution_lock: RawSpinLock, +pub struct VCpuInner { + /// The state is only changed in the context of the vCPU being run. This + /// ensures the scheduler can easily keep track of the vCPU state as + /// transitions are indicated by the return code from the run call. + pub state: VCpuStatus, + pub cpu: *mut Cpu, + pub regs: ArchRegs, +} - /// Protects accesses to vCPU's interrupts. - pub interrupts_lock: RawSpinLock, +impl VCpuInner { + /// Initialise the registers for the given vCPU and set the state to + /// VCpuStatus::Ready. The caller must hold the vCPU execution lock while + /// calling this. + pub fn on(&mut self, entry: ipaddr_t, arg: uintreg_t) { + self.regs.set_pc_arg(entry, arg); + self.state = VCpuStatus::Ready; + } - /// The state is only changed in the context of the vCPU being run. This ensures the scheduler - /// can easily keep track of the vCPU state as transitions are indicated by the return code from - /// the run call. - pub state: VCpuStatus, + /// Check whether self is an off state, for the purpose of turning vCPUs on + /// and off. Note that aborted still counts as on in this context. + pub fn is_off(&self) -> bool { + // Aborted still counts as ON for the purposes of PSCI, because according to the PSCI + // specification (section 5.7.1) a core is only considered to be off if it has been turned + // off with a CPU_OFF call or hasn't yet been turned on with a CPU_ON call. + self.state == VCpuStatus::Off + } +} - pub cpu: *mut Cpu, +#[repr(C)] +pub struct VCpu { pub vm: *mut Vm, - pub regs: ArchRegs, - pub interrupts: Interrupts, + + /// If a vCPU is running, its lock is logically held by the running pCPU. + pub inner: SpinLock, + pub interrupts: SpinLock, } /// Encapsulates a vCPU whose lock is held. #[repr(C)] -#[derive(Copy, Clone)] pub struct VCpuExecutionLocked { vcpu: *mut VCpu, } +impl Drop for VCpuExecutionLocked { + fn drop(&mut self) { + unsafe { + (*self.vcpu).inner.unlock_unchecked(); + } + } +} + +impl VCpuExecutionLocked { + pub unsafe fn from_raw(vcpu: *mut VCpu) -> Self { + Self { vcpu } + } + + pub fn into_raw(self) -> *mut VCpu { + let ret = self.vcpu; + mem::forget(self); + ret + } + + pub fn get_vcpu(&self) -> &VCpu { + unsafe { &*self.vcpu } + } + + pub fn get_inner(&self) -> &VCpuInner { + unsafe { (*self.vcpu).inner.get_unchecked() } + } + + pub fn get_inner_mut(&mut self) -> &mut VCpuInner { + unsafe { (*self.vcpu).inner.get_mut_unchecked() } + } +} + // TODO: Update alignment such that cpus are in different cache lines. #[repr(C)] pub struct Cpu { @@ -141,15 +312,8 @@ pub struct Cpu { /// `pub` here is only required by `arch_cpu_module_init`. pub stack_bottom: *mut c_void, - /// Enabling/disabling irqs are counted per-cpu. They are enabled when the count is zero, and - /// disabled when it's non-zero. - irq_disable_count: u32, - - /// See api.c for the partial ordering on locks. - lock: RawSpinLock, - /// Determines whether or not the cpu is currently on. - is_on: bool, + is_on: SpinLock, } // unsafe impl Sync for Cpu {} @@ -171,12 +335,11 @@ extern "C" { static mut cpus: MaybeUninit<[Cpu; MAX_CPUS]>; } -static mut cpu_count: u32 = 1; +static mut CPU_COUNT: u32 = 1; #[no_mangle] pub unsafe extern "C" fn cpu_init(c: *mut Cpu) { // TODO: Assumes that c is zeroed out already. - sl_init(&mut (*c).lock); } #[no_mangle] @@ -187,14 +350,14 @@ pub unsafe extern "C" fn cpu_module_init(cpu_ids: *mut cpu_id_t, count: usize) { arch_cpu_module_init(); - cpu_count = count as u32; + CPU_COUNT = count as u32; // Initialize CPUs with the IDs from the configuration passed in. The // CPUs after the boot CPU are initialized in reverse order. The boot // CPU is initialized when it is found or in place of the last CPU if it // is not found. - j = cpu_count; - for i in 0..cpu_count { + j = CPU_COUNT; + for i in 0..CPU_COUNT { let c: *mut Cpu; let id: cpu_id_t = *cpu_ids.offset(i as isize); @@ -233,19 +396,15 @@ pub unsafe extern "C" fn cpu_index(c: *mut Cpu) -> usize { #[no_mangle] pub unsafe extern "C" fn cpu_on(c: *mut Cpu, entry: ipaddr_t, arg: uintreg_t) -> bool { let prev: bool; - - sl_lock(&(*c).lock); - prev = (*c).is_on; - (*c).is_on = true; - sl_unlock(&(*c).lock); + let mut is_on = (*c).is_on.lock(); + prev = *is_on; + *is_on = true; if !prev { let vm = vm_find(HF_PRIMARY_VM_ID); let vcpu = vm_get_vcpu(vm, cpu_index(c) as u16); - let mut vcpu_execution_locked = vcpu_lock(vcpu); - vcpu_on(vcpu_execution_locked, entry, arg); - vcpu_unlock(&mut vcpu_execution_locked); + (*vcpu).inner.lock().on(entry, arg); } prev @@ -254,15 +413,13 @@ pub unsafe extern "C" fn cpu_on(c: *mut Cpu, entry: ipaddr_t, arg: uintreg_t) -> /// Prepares the CPU for turning itself off. #[no_mangle] pub unsafe extern "C" fn cpu_off(c: *mut Cpu) { - sl_lock(&(*c).lock); - (*c).is_on = true; - sl_unlock(&(*c).lock); + *((*c).is_on.lock()) = false; } /// Searches for a CPU based on its id. #[no_mangle] pub unsafe extern "C" fn cpu_find(id: cpu_id_t) -> *mut Cpu { - for i in 0usize..cpu_count as usize { + for i in 0usize..CPU_COUNT as usize { if cpus.get_ref()[i].id == id { return &mut cpus.get_mut()[i]; } @@ -274,7 +431,7 @@ pub unsafe extern "C" fn cpu_find(id: cpu_id_t) -> *mut Cpu { /// Locks the given vCPU and updates `locked` to hold the newly locked vCPU. #[no_mangle] pub unsafe extern "C" fn vcpu_lock(vcpu: *mut VCpu) -> VCpuExecutionLocked { - sl_lock(&(*vcpu).execution_lock); + mem::forget((*vcpu).inner.lock()); VCpuExecutionLocked { vcpu } } @@ -282,29 +439,29 @@ pub unsafe extern "C" fn vcpu_lock(vcpu: *mut VCpu) -> VCpuExecutionLocked { /// Tries to lock the given vCPU, and updates `locked` if succeed. #[no_mangle] pub unsafe extern "C" fn vcpu_try_lock(vcpu: *mut VCpu, locked: *mut VCpuExecutionLocked) -> bool { - if sl_try_lock(&(*vcpu).execution_lock) { - *locked = VCpuExecutionLocked { vcpu }; - true - } else { - false - } + (*vcpu) + .inner + .try_lock() + .map(|guard| { + mem::forget(guard); + ptr::write(locked, VCpuExecutionLocked { vcpu }); + }) + .is_some() } /// Unlocks a vCPU previously locked with vcpu_lock, and updates `locked` to /// reflect the fact that the vCPU is no longer locked. #[no_mangle] pub unsafe extern "C" fn vcpu_unlock(locked: *mut VCpuExecutionLocked) { - sl_unlock(&(*(*locked).vcpu).execution_lock); + drop(ptr::read(locked)); (*locked).vcpu = ptr::null_mut(); } #[no_mangle] pub unsafe extern "C" fn vcpu_init(vcpu: *mut VCpu, vm: *mut Vm) { memset_s(vcpu as _, mem::size_of::(), 0, mem::size_of::()); - sl_init(&mut (*vcpu).execution_lock); - sl_init(&mut (*vcpu).interrupts_lock); (*vcpu).vm = vm; - (*vcpu).state = VCpuStatus::Off; + (*vcpu).inner.get_mut_unchecked().state = VCpuStatus::Off; } /// Initialise the registers for the given vCPU and set the state to @@ -312,8 +469,8 @@ pub unsafe extern "C" fn vcpu_init(vcpu: *mut VCpu, vm: *mut Vm) { /// calling this. #[no_mangle] pub unsafe extern "C" fn vcpu_on(vcpu: VCpuExecutionLocked, entry: ipaddr_t, arg: uintreg_t) { - arch_regs_set_pc_arg(&mut (*vcpu.vcpu).regs, entry, arg); - (*vcpu.vcpu).state = VCpuStatus::Ready; + let mut vcpu = ManuallyDrop::new(vcpu); + vcpu.get_inner_mut().on(entry, arg); } #[no_mangle] @@ -326,12 +483,12 @@ pub unsafe extern "C" fn vcpu_index(vcpu: *const VCpu) -> spci_vcpu_index_t { #[no_mangle] pub unsafe extern "C" fn vcpu_get_regs(vcpu: *mut VCpu) -> *mut ArchRegs { - &mut (*vcpu).regs + &mut (*vcpu).inner.get_mut_unchecked().regs } #[no_mangle] pub unsafe extern "C" fn vcpu_get_regs_const(vcpu: *const VCpu) -> *const ArchRegs { - &(*vcpu).regs + &(*vcpu).inner.get_mut_unchecked().regs } #[no_mangle] @@ -341,36 +498,27 @@ pub unsafe extern "C" fn vcpu_get_vm(vcpu: *mut VCpu) -> *mut Vm { #[no_mangle] pub unsafe extern "C" fn vcpu_get_cpu(vcpu: *mut VCpu) -> *mut Cpu { - (*vcpu).cpu + (*vcpu).inner.get_mut_unchecked().cpu } #[no_mangle] pub unsafe extern "C" fn vcpu_set_cpu(vcpu: *mut VCpu, cpu: *mut Cpu) { - (*vcpu).cpu = cpu; + (*vcpu).inner.get_mut_unchecked().cpu = cpu; } #[no_mangle] pub unsafe extern "C" fn vcpu_get_interrupts(vcpu: *mut VCpu) -> *mut Interrupts { - &mut (*vcpu).interrupts + (*vcpu).interrupts.get_mut_unchecked() } -/// Check whether the given vcpu_state is an off state, for the purpose of +/// Check whether the given vcpu_inner is an off state, for the purpose of /// turning vCPUs on and off. Note that aborted still counts as on in this /// context. #[no_mangle] pub unsafe extern "C" fn vcpu_is_off(vcpu: VCpuExecutionLocked) -> bool { - match (*vcpu.vcpu).state { - VCpuStatus::Off => true, - _ => - // Aborted still counts as ON for the purposes of PSCI, - // because according to the PSCI specification (section - // 5.7.1) a core is only considered to be off if it has - // been turned off with a CPU_OFF call or hasn't yet - // been turned on with a CPU_ON call. - { - false - } - } + let vcpu = ManuallyDrop::new(vcpu); + let result = (*vcpu.vcpu).inner.get_mut_unchecked().is_off(); + result } /// Starts a vCPU of a secondary VM. @@ -383,30 +531,21 @@ pub unsafe extern "C" fn vcpu_secondary_reset_and_start( entry: ipaddr_t, arg: uintreg_t, ) -> bool { - let mut vcpu_execution_locked; - let vm = (*vcpu).vm; - let vcpu_was_off; + let vm = &*(*vcpu).vm; - assert!((*vm).id != HF_PRIMARY_VM_ID); + assert!(vm.id != HF_PRIMARY_VM_ID); - vcpu_execution_locked = vcpu_lock(vcpu); - vcpu_was_off = vcpu_is_off(vcpu_execution_locked); + let mut state = (*vcpu).inner.lock(); + let vcpu_was_off = state.is_off(); if vcpu_was_off { // Set vCPU registers to a clean state ready for boot. As this // is a secondary which can migrate between pCPUs, the ID of the // vCPU is defined as the index and does not match the ID of the // pCPU it is running on. - arch_regs_reset( - &mut (*vcpu).regs, - false, - (*vm).id, - vcpu_index(vcpu) as cpu_id_t, - (*vm).get_ptable_raw(), - ); - vcpu_on(vcpu_execution_locked, entry, arg); + state.regs.reset(false, vm, vcpu_index(vcpu) as cpu_id_t); + state.on(entry, arg); } - vcpu_unlock(&mut vcpu_execution_locked); vcpu_was_off } diff --git a/hfo2/src/dlog.rs b/hfo2/src/dlog.rs index bf13b901e..98f41a8a6 100644 --- a/hfo2/src/dlog.rs +++ b/hfo2/src/dlog.rs @@ -17,7 +17,6 @@ use core::fmt; use crate::spinlock::*; -use crate::types::*; extern "C" { fn plat_console_putchar(c: u8); diff --git a/hfo2/src/mm.rs b/hfo2/src/mm.rs index 71489519e..f4f6fd270 100644 --- a/hfo2/src/mm.rs +++ b/hfo2/src/mm.rs @@ -24,7 +24,7 @@ //! //! We assume that the stage 1 and stage 2 page table addresses are `usize`. It looks like that //! assumption might not be holding so we need to check that everything is going to be okay. -//! +//! //! TODO(HfO2): Many functions return Option<()> to represent success or fail. //! Change them to return Result<(), ()> (#34.) @@ -120,6 +120,7 @@ bitflags! { /// - !V !O !X : Invalid memory. Memory is unrelated to the VM. /// /// Modes are selected so that owner of exclusive memory is the default. + #[repr(C)] pub struct Mode: u32 { /// Read const R = 0b00000001; @@ -163,10 +164,10 @@ bitflags! { } /// The type of addresses stored in the page table. +#[allow(non_camel_case_types)] type ptable_addr_t = uintvaddr_t; -/// For stage 2, the input is an intermediate physical addresses rather than a -/// virtual address so: +// For stage 2, the input is an intermediate physical addresses rather than a virtual address so: const_assert_eq!(addr_size_eq; mem::size_of::(), mem::size_of::()); /// The hypervisor page table. diff --git a/hfo2/src/slist.rs b/hfo2/src/slist.rs index d576cf91a..0b9ae9cbd 100644 --- a/hfo2/src/slist.rs +++ b/hfo2/src/slist.rs @@ -20,6 +20,7 @@ use core::ptr; /// An entry in an intrusive linked list. #[derive(Debug)] +#[repr(C)] pub struct ListEntry { /// The next entry in the linked list. next: Cell<*const ListEntry>, @@ -86,6 +87,7 @@ pub trait IsElement { /// A lock-free, intrusive linked list of type `T`. #[derive(Debug)] +#[repr(C)] pub struct List = T> { /// The head of the linked list. pub(crate) head: ListEntry, diff --git a/hfo2/src/spci.rs b/hfo2/src/spci.rs index e47c9b5eb..becedad1b 100644 --- a/hfo2/src/spci.rs +++ b/hfo2/src/spci.rs @@ -51,6 +51,7 @@ pub enum SpciMemoryShare { bitflags! { /// For SpciMessage::flags /// flags[15:1] reserved(MBZ). + #[repr(C)] pub struct SpciMessageFlags: u16 { /// Architected message payload. const ARCHITECTED = 0b0000; diff --git a/hfo2/src/spinlock.rs b/hfo2/src/spinlock.rs index 7c62f8284..b7ddbda9b 100644 --- a/hfo2/src/spinlock.rs +++ b/hfo2/src/spinlock.rs @@ -16,6 +16,7 @@ use core::cell::UnsafeCell; use core::marker::PhantomData; +use core::mem; use core::ops::{Deref, DerefMut}; use core::ptr; use core::sync::atomic::{spin_loop_hint, AtomicBool, Ordering}; @@ -86,6 +87,21 @@ impl SpinLock { } } + pub fn try_lock<'s>(&'s self) -> Option> { + if self.lock.try_lock() { + Some(SpinLockGuard { + lock: self, + _marker: PhantomData, + }) + } else { + None + } + } + + pub unsafe fn unlock_unchecked(&self) { + self.lock.unlock(); + } + pub unsafe fn get_unchecked(&self) -> &T { &*self.data.get() } @@ -146,7 +162,9 @@ impl<'s, T> DerefMut for SpinLockGuard<'s, T> { impl<'s, T> SpinLockGuard<'s, T> { pub fn into_raw(self) -> usize { - self.lock as *const _ as usize + let ret = self.lock as *const _ as usize; + mem::forget(self); + ret } pub unsafe fn from_raw(data: usize) -> Self { diff --git a/hfo2/src/vm.rs b/hfo2/src/vm.rs index 903940f91..752539099 100644 --- a/hfo2/src/vm.rs +++ b/hfo2/src/vm.rs @@ -25,7 +25,6 @@ use arrayvec::ArrayVec; use crate::addr::*; use crate::arch::*; use crate::cpu::*; -use crate::dlog::*; use crate::list::*; use crate::mm::*; use crate::mpool::*; @@ -153,7 +152,9 @@ impl Mailbox { let mut hypervisor_ptable = HYPERVISOR_PAGE_TABLE.lock(); // Map the send page as read-only in the hypervisor address space. - if hypervisor_ptable.identity_map(pa_send_begin, pa_send_end, Mode::R, local_page_pool).is_some() + if hypervisor_ptable + .identity_map(pa_send_begin, pa_send_end, Mode::R, local_page_pool) + .is_some() { self.send = pa_addr(pa_send_begin) as usize as *const SpciMessage; } else { @@ -165,7 +166,9 @@ impl Mailbox { // Map the receive page as writable in the hypervisor address space. On // failure, unmap the send page before returning. - if hypervisor_ptable.identity_map(pa_recv_begin, pa_recv_end, Mode::W, local_page_pool).is_some() + if hypervisor_ptable + .identity_map(pa_recv_begin, pa_recv_end, Mode::W, local_page_pool) + .is_some() { self.recv = pa_addr(pa_recv_begin) as usize as *mut SpciMessage; } else { @@ -269,60 +272,55 @@ impl VmInner { let local_page_pool: MPool = MPool::new_with_fallback(fallback_mpool); // Take memory ownership away from the VM and mark as shared. - self.ptable.identity_map( - pa_send_begin, - pa_send_end, - Mode::UNOWNED | Mode::SHARED | Mode::R | Mode::W, - &local_page_pool, - ).ok_or(())?; - - if self.ptable.identity_map( - pa_recv_begin, - pa_recv_end, - Mode::UNOWNED | Mode::SHARED | Mode::R, - &local_page_pool, - ).is_none() { + self.ptable + .identity_map( + pa_send_begin, + pa_send_end, + Mode::UNOWNED | Mode::SHARED | Mode::R | Mode::W, + &local_page_pool, + ) + .ok_or(())?; + + if self + .ptable + .identity_map( + pa_recv_begin, + pa_recv_end, + Mode::UNOWNED | Mode::SHARED | Mode::R, + &local_page_pool, + ) + .is_none() + { // TODO: partial defrag of failed range. // Recover any memory consumed in failed mapping. self.ptable.defrag(&local_page_pool); assert!(self .ptable - .identity_map( - pa_send_begin, - pa_send_end, - orig_send_mode, - &local_page_pool - ) + .identity_map(pa_send_begin, pa_send_end, orig_send_mode, &local_page_pool) .is_some()); return Err(()); } - if self.mailbox.configure_stage1( - pa_send_begin, - pa_send_end, - pa_recv_begin, - pa_recv_end, - &local_page_pool, - ).is_err() { + if self + .mailbox + .configure_stage1( + pa_send_begin, + pa_send_end, + pa_recv_begin, + pa_recv_end, + &local_page_pool, + ) + .is_err() + { assert!(self .ptable - .identity_map( - pa_recv_begin, - pa_recv_end, - orig_recv_mode, - &local_page_pool - ) + .identity_map(pa_recv_begin, pa_recv_end, orig_recv_mode, &local_page_pool) .is_some()); assert!(self .ptable - .identity_map( - pa_send_begin, - pa_send_end, - orig_send_mode, - &local_page_pool - ) + .identity_map(pa_send_begin, pa_send_end, orig_send_mode, &local_page_pool) .is_some()); return Err(()); @@ -372,13 +370,15 @@ impl VmInner { .get_mode(send, ipa_add(send, PAGE_SIZE)) .filter(|mode| mode.valid_owned_and_exclusive()) .filter(|mode| mode.contains(Mode::R)) - .filter(|mode| mode.contains(Mode::W)).ok_or(())?; + .filter(|mode| mode.contains(Mode::W)) + .ok_or(())?; let orig_recv_mode = self .ptable .get_mode(recv, ipa_add(recv, PAGE_SIZE)) .filter(|mode| mode.valid_owned_and_exclusive()) - .filter(|mode| mode.contains(Mode::R)).ok_or(())?; + .filter(|mode| mode.contains(Mode::R)) + .ok_or(())?; self.configure_pages( pa_send_begin, @@ -517,8 +517,8 @@ pub struct TwoVmLocked { pub vm2: VmLocked, } -static mut vms: MaybeUninit<[Vm; MAX_VMS]> = MaybeUninit::uninit(); -static mut vm_count: spci_vm_count_t = 0; +static mut VMS: MaybeUninit<[Vm; MAX_VMS]> = MaybeUninit::uninit(); +static mut VM_COUNT: spci_vm_count_t = 0; #[no_mangle] pub unsafe extern "C" fn vm_init( @@ -526,14 +526,13 @@ pub unsafe extern "C" fn vm_init( ppool: *mut MPool, new_vm: *mut *mut Vm, ) -> bool { - let i: i32; let vm: *mut Vm; - if vm_count as usize >= MAX_VMS { + if VM_COUNT as usize >= MAX_VMS { return false; } - vm = &mut vms.get_mut()[vm_count as usize]; + vm = &mut VMS.get_mut()[VM_COUNT as usize]; memset_s( vm as usize as _, @@ -542,17 +541,20 @@ pub unsafe extern "C" fn vm_init( mem::size_of::(), ); - (*vm).id = vm_count; + (*vm).id = VM_COUNT; (*vm).vcpu_count = vcpu_count; (*vm).aborting = AtomicBool::new(false); - (*vm).inner.get_mut_unchecked().init(vm, &mut *ppool); + let result = (*vm).inner.get_mut_unchecked().init(vm, &mut *ppool); + if result.is_err() { + return false; + } // Do basic initialization of vcpus. for i in 0..vcpu_count { vcpu_init(vm_get_vcpu(vm, i), vm); } - vm_count += 1; + VM_COUNT += 1; *new_vm = vm; true @@ -560,27 +562,24 @@ pub unsafe extern "C" fn vm_init( #[no_mangle] pub unsafe extern "C" fn vm_get_count() -> spci_vm_count_t { - vm_count + VM_COUNT } #[no_mangle] pub unsafe extern "C" fn vm_find(id: spci_vm_id_t) -> *mut Vm { // Ensure the VM is initialized. - if id >= vm_count { + if id >= VM_COUNT { return ptr::null_mut(); } - &mut vms.get_mut()[id as usize] + &mut VMS.get_mut()[id as usize] } /// Locks the given VM and updates `locked` to hold the newly locked vm. #[no_mangle] pub unsafe extern "C" fn vm_lock(vm: *mut Vm) -> VmLocked { - let locked = VmLocked { vm }; - - (*vm).inner.lock().into_raw(); - - locked + mem::forget((*vm).inner.lock()); + VmLocked { vm } } /// Locks two VMs ensuring that the locking order is according to the locks' @@ -601,9 +600,7 @@ pub unsafe extern "C" fn vm_lock_both(vm1: *mut Vm, vm2: *mut Vm) -> TwoVmLocked /// the fact that the VM is no longer locked. #[no_mangle] pub unsafe extern "C" fn vm_unlock(locked: *mut VmLocked) { - let guard = - SpinLockGuard::<'static, VmInner>::from_raw(&(*(*locked).vm).inner as *const _ as usize); - mem::drop(guard); + (*(*locked).vm).inner.unlock_unchecked(); (*locked).vm = ptr::null_mut(); } diff --git a/rust-toolchain b/rust-toolchain index 0a417a749..5e0fdfa1a 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -nightly-2019-06-17 +nightly-2019-08-28