Permalink
Browse files

core: consolidate arch_resume_cpu / arch_resume_cpu

No need to duplicate code, we now have the same path on all
architectures.

Additionally, suspend_cpu() is only called in hypervisor/control.c.
Restrict its visibility and make it static.

Signed-off-by: Ralf Ramsauer <ralf.ramsauer@oth-regensburg.de>
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
  • Loading branch information...
rralf authored and jan-kiszka committed Aug 14, 2018
1 parent ff306a0 commit 1dbfecb2bf427872938b3b2530a6d20f4669b630
@@ -120,9 +120,9 @@ The main reason behind Jailhouse trapping ICR (and few other registers) access i
Now let's turn to interrupt handling. In vmcs_setup(), Jailhouse skips enabling external-interrupt exiting and sets exception bitmaps to all-zeroes. This means the the only interrupt that results in VM exit is Non-Maskable Interrupt (NMI). Everything else is dispatched through guest IDT and handled in guest mode. Since cells asserts full control on their own resources, this makes sense.
Currently, NMIs can only come from the hypervisor itself (see TODO in apic_deliver_ipi() function) which uses them to control CPUs (arch_suspend_cpu() is an example). When NMI occurs in VM, it exits and Jailhouse re-throws NMI in host mode. The CPU dispatches it through the host IDT and jumps to apic_nmi_handler(). It schedules another VM exit using VMX feature known as preemption timer. vmcs_setup() sets this timer to zero, so if it is enabled, VM exit occurs immediately after VM entry. The reason behind this indirection is serialization: this way, NMIs (which are asynchronous by nature) are always delivered after guest entries.
Currently, NMIs can only come from the hypervisor itself (see TODO in apic_deliver_ipi() function) which uses them to control CPUs (suspend_cpu() is an example). When NMI occurs in VM, it exits and Jailhouse re-throws NMI in host mode. The CPU dispatches it through the host IDT and jumps to apic_nmi_handler(). It schedules another VM exit using VMX feature known as preemption timer. vmcs_setup() sets this timer to zero, so if it is enabled, VM exit occurs immediately after VM entry. The reason behind this indirection is serialization: this way, NMIs (which are asynchronous by nature) are always delivered after guest entries.
At this next exit, vmx_handle_exit() calls apic_handle_events() which spins in a loop waiting for cpu_data->wait_for_sipi or cpu->stop_cpu to become false. arch_suspend_cpu() sets cpu->stop_cpu flag and arch_resume_cpu() clears it, and apic_deliver_ipi() function resets cpu_data->wait_for_sipi for a target CPU when delivering Startup IPI (SIPI). It also stores the vector SIPI contains in cpu_data->sipi_vector for later use (see "Creating a cell"). For Jailhouse-initiated CPU resets, arch_reset_cpu() sets sipi_vector to APIC_BSP_PSEUDO_SIPI (0x100, real SIPI vectors are always less than 0xff).
At this next exit, vmx_handle_exit() calls apic_handle_events() which spins in a loop waiting for cpu_data->wait_for_sipi or cpu->stop_cpu to become false. suspend_cpu() sets cpu->stop_cpu flag and resume_cpu() clears it, and apic_deliver_ipi() function resets cpu_data->wait_for_sipi for a target CPU when delivering Startup IPI (SIPI). It also stores the vector SIPI contains in cpu_data->sipi_vector for later use (see "Creating a cell"). For Jailhouse-initiated CPU resets, arch_reset_cpu() sets sipi_vector to APIC_BSP_PSEUDO_SIPI (0x100, real SIPI vectors are always less than 0xff).
Any other interrupt or exception in host mode is considered serious fault and results in panic.
@@ -46,56 +46,18 @@ void arm_cpu_kick(unsigned int cpu_id)
irqchip_send_sgi(&sgi);
}
void arch_suspend_cpu(unsigned int cpu_id)
{
struct public_per_cpu *target_data = public_per_cpu(cpu_id);
bool target_suspended;
spin_lock(&target_data->control_lock);
target_data->suspend_cpu = true;
target_suspended = target_data->cpu_suspended;
spin_unlock(&target_data->control_lock);
if (!target_suspended) {
/*
* Send a maintenance signal (SGI_EVENT) to the target CPU.
* Then, wait for the target CPU to enter the suspended state.
* The target CPU, in turn, will leave the guest and handle the
* request in the event loop.
*/
arch_send_event(target_data);
while (!target_data->cpu_suspended)
cpu_relax();
}
}
void arch_resume_cpu(unsigned int cpu_id)
{
struct public_per_cpu *target_data = public_per_cpu(cpu_id);
/* take lock to avoid theoretical race with a pending suspension */
spin_lock(&target_data->control_lock);
target_data->suspend_cpu = false;
spin_unlock(&target_data->control_lock);
}
void arch_reset_cpu(unsigned int cpu_id)
{
public_per_cpu(cpu_id)->reset = true;
arch_resume_cpu(cpu_id);
resume_cpu(cpu_id);
}
void arch_park_cpu(unsigned int cpu_id)
{
public_per_cpu(cpu_id)->park = true;
arch_resume_cpu(cpu_id);
resume_cpu(cpu_id);
}
static void check_events(struct public_per_cpu *cpu_public)
@@ -119,56 +119,18 @@ void arch_prepare_shutdown(void)
iommu_prepare_shutdown();
}
void arch_suspend_cpu(unsigned int cpu_id)
{
struct public_per_cpu *target_data = public_per_cpu(cpu_id);
bool target_suspended;
spin_lock(&target_data->control_lock);
target_data->suspend_cpu = true;
target_suspended = target_data->cpu_suspended;
spin_unlock(&target_data->control_lock);
if (!target_suspended) {
/*
* Send a maintenance signal (NMI) to the target CPU.
* Then, wait for the target CPU to enter the suspended state.
* The target CPU, in turn, will leave the guest and handle the
* request in the event loop.
*/
arch_send_event(target_data);
while (!target_data->cpu_suspended)
cpu_relax();
}
}
void arch_resume_cpu(unsigned int cpu_id)
{
struct public_per_cpu *target_data = public_per_cpu(cpu_id);
/* take lock to avoid theoretical race with a pending suspension */
spin_lock(&target_data->control_lock);
target_data->suspend_cpu = false;
spin_unlock(&target_data->control_lock);
}
void arch_reset_cpu(unsigned int cpu_id)
{
public_per_cpu(cpu_id)->sipi_vector = APIC_BSP_PSEUDO_SIPI;
arch_resume_cpu(cpu_id);
resume_cpu(cpu_id);
}
void arch_park_cpu(unsigned int cpu_id)
{
public_per_cpu(cpu_id)->init_signaled = true;
arch_resume_cpu(cpu_id);
resume_cpu(cpu_id);
}
void x86_send_init_sipi(unsigned int cpu_id, enum x86_init_sipi type,
View
@@ -20,6 +20,7 @@
#include <jailhouse/unit.h>
#include <jailhouse/utils.h>
#include <asm/bitops.h>
#include <asm/control.h>
#include <asm/spinlock.h>
enum msg_type {MSG_REQUEST, MSG_INFORMATION};
@@ -73,6 +74,63 @@ bool cpu_id_valid(unsigned long cpu_id)
test_bit(cpu_id, system_cpu_set));
}
/**
* Suspend a remote CPU.
* @param cpu_id ID of the target CPU.
*
* Suspension means that the target CPU is no longer executing cell code or
* arbitrary hypervisor code. It may actively busy-wait in the hypervisor
* context, so the suspension time should be kept short.
*
* The function waits for the target CPU to enter suspended state.
*
* This service can be used to synchronize with other CPUs before performing
* management tasks.
*
* @note This function must not be invoked for the caller's CPU.
*
* @see resume_cpu
* @see arch_reset_cpu
* @see arch_park_cpu
*/
static void suspend_cpu(unsigned int cpu_id)
{
struct public_per_cpu *target_data = public_per_cpu(cpu_id);
bool target_suspended;
spin_lock(&target_data->control_lock);
target_data->suspend_cpu = true;
target_suspended = target_data->cpu_suspended;
spin_unlock(&target_data->control_lock);
if (!target_suspended) {
/*
* Send a maintenance signal to the target CPU.
* Then, wait for the target CPU to enter the suspended state.
* The target CPU, in turn, will leave the guest and handle the
* request in the event loop.
*/
arch_send_event(target_data);
while (!target_data->cpu_suspended)
cpu_relax();
}
}
void resume_cpu(unsigned int cpu_id)
{
struct public_per_cpu *target_data = public_per_cpu(cpu_id);
/* take lock to avoid theoretical race with a pending suspension */
spin_lock(&target_data->control_lock);
target_data->suspend_cpu = false;
spin_unlock(&target_data->control_lock);
}
/*
* Suspend all CPUs assigned to the cell except the one executing
* the function (if it is in the cell's CPU set) to prevent races.
@@ -82,15 +140,15 @@ static void cell_suspend(struct cell *cell)
unsigned int cpu;
for_each_cpu_except(cpu, cell->cpu_set, this_cpu_id())
arch_suspend_cpu(cpu);
suspend_cpu(cpu);
}
static void cell_resume(struct cell *cell)
{
unsigned int cpu;
for_each_cpu_except(cpu, cell->cpu_set, this_cpu_id())
arch_resume_cpu(cpu);
resume_cpu(cpu);
}
/**
@@ -130,36 +130,15 @@ void shutdown(void);
void __attribute__((noreturn)) panic_stop(void);
void panic_park(void);
/**
* Suspend a remote CPU.
* @param cpu_id ID of the target CPU.
*
* Suspension means that the target CPU is no longer executing cell code or
* arbitrary hypervisor code. It may actively busy-wait in the hypervisor
* context, so the suspension time should be kept short.
*
* The function waits for the target CPU to enter suspended state.
*
* This service can be used to synchronize with other CPUs before performing
* management tasks.
*
* @note This function must not be invoked for the caller's CPU.
*
* @see arch_resume_cpu
* @see arch_reset_cpu
* @see arch_park_cpu
*/
void arch_suspend_cpu(unsigned int cpu_id);
/**
* Resume a suspended remote CPU.
* @param cpu_id ID of the target CPU.
*
* @note This function must not be invoked for the caller's CPU.
*
* @see arch_suspend_cpu
* @see suspend_cpu
*/
void arch_resume_cpu(unsigned int cpu_id);
void resume_cpu(unsigned int cpu_id);
/**
* Reset a suspended remote CPU and resumes its execution.
@@ -171,7 +150,7 @@ void arch_resume_cpu(unsigned int cpu_id);
* @note This function must not be invoked for the caller's CPU or if the
* target CPU is not in suspend state.
*
* @see arch_suspend_cpu
* @see suspend_cpu
*/
void arch_reset_cpu(unsigned int cpu_id);
@@ -190,7 +169,7 @@ void arch_reset_cpu(unsigned int cpu_id);
* @note This function must not be invoked for the caller's CPU or if the
* target CPU is not in suspend state.
*
* @see arch_suspend_cpu
* @see suspend_cpu
*/
void arch_park_cpu(unsigned int cpu_id);

0 comments on commit 1dbfecb

Please sign in to comment.