Skip to content

Commit

Permalink
*_run_on_cpu: introduce run_on_cpu_data type
Browse files Browse the repository at this point in the history
This changes the *_run_on_cpu APIs (and helpers) to pass data in a
run_on_cpu_data type instead of a plain void *. This is because we
sometimes want to pass a target address (target_ulong) and this fails on
32 bit hosts emulating 64 bit guests.

Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Message-Id: <20161027151030.20863-24-alex.bennee@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
bonzini committed Oct 31, 2016
1 parent 12e9700 commit 14e6fe1
Show file tree
Hide file tree
Showing 16 changed files with 95 additions and 75 deletions.
9 changes: 5 additions & 4 deletions cpus-common.c
Expand Up @@ -109,7 +109,7 @@ void cpu_list_remove(CPUState *cpu)
struct qemu_work_item {
struct qemu_work_item *next;
run_on_cpu_func func;
void *data;
run_on_cpu_data data;
bool free, exclusive, done;
};

Expand All @@ -129,7 +129,7 @@ static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
qemu_cpu_kick(cpu);
}

void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data,
void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
QemuMutex *mutex)
{
struct qemu_work_item wi;
Expand All @@ -154,7 +154,7 @@ void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data,
}
}

void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
{
struct qemu_work_item *wi;

Expand Down Expand Up @@ -296,7 +296,8 @@ void cpu_exec_end(CPUState *cpu)
}
}

void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func,
run_on_cpu_data data)
{
struct qemu_work_item *wi;

Expand Down
7 changes: 4 additions & 3 deletions cpus.c
Expand Up @@ -556,7 +556,7 @@ static const VMStateDescription vmstate_timers = {
}
};

static void cpu_throttle_thread(CPUState *cpu, void *opaque)
static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
{
double pct;
double throttle_ratio;
Expand Down Expand Up @@ -587,7 +587,8 @@ static void cpu_throttle_timer_tick(void *opaque)
}
CPU_FOREACH(cpu) {
if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
async_run_on_cpu(cpu, cpu_throttle_thread, NULL);
async_run_on_cpu(cpu, cpu_throttle_thread,
RUN_ON_CPU_NULL);
}
}

Expand Down Expand Up @@ -914,7 +915,7 @@ void qemu_init_cpu_loop(void)
qemu_thread_get_self(&io_thread);
}

void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
{
do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
}
Expand Down
14 changes: 7 additions & 7 deletions hw/i386/kvm/apic.c
Expand Up @@ -133,9 +133,9 @@ static void kvm_apic_vapic_base_update(APICCommonState *s)
}
}

static void kvm_apic_put(CPUState *cs, void *data)
static void kvm_apic_put(CPUState *cs, run_on_cpu_data data)
{
APICCommonState *s = data;
APICCommonState *s = data.host_ptr;
struct kvm_lapic_state kapic;
int ret;

Expand All @@ -151,12 +151,12 @@ static void kvm_apic_put(CPUState *cs, void *data)

static void kvm_apic_post_load(APICCommonState *s)
{
run_on_cpu(CPU(s->cpu), kvm_apic_put, s);
run_on_cpu(CPU(s->cpu), kvm_apic_put, RUN_ON_CPU_HOST_PTR(s));
}

static void do_inject_external_nmi(CPUState *cpu, void *data)
static void do_inject_external_nmi(CPUState *cpu, run_on_cpu_data data)
{
APICCommonState *s = data;
APICCommonState *s = data.host_ptr;
uint32_t lvt;
int ret;

Expand All @@ -174,7 +174,7 @@ static void do_inject_external_nmi(CPUState *cpu, void *data)

static void kvm_apic_external_nmi(APICCommonState *s)
{
run_on_cpu(CPU(s->cpu), do_inject_external_nmi, s);
run_on_cpu(CPU(s->cpu), do_inject_external_nmi, RUN_ON_CPU_HOST_PTR(s));
}

static void kvm_send_msi(MSIMessage *msg)
Expand Down Expand Up @@ -213,7 +213,7 @@ static void kvm_apic_reset(APICCommonState *s)
/* Not used by KVM, which uses the CPU mp_state instead. */
s->wait_for_sipi = 0;

run_on_cpu(CPU(s->cpu), kvm_apic_put, s);
run_on_cpu(CPU(s->cpu), kvm_apic_put, RUN_ON_CPU_HOST_PTR(s));
}

static void kvm_apic_realize(DeviceState *dev, Error **errp)
Expand Down
13 changes: 6 additions & 7 deletions hw/i386/kvmvapic.c
Expand Up @@ -487,10 +487,9 @@ typedef struct VAPICEnableTPRReporting {
bool enable;
} VAPICEnableTPRReporting;

static void vapic_do_enable_tpr_reporting(CPUState *cpu, void *data)
static void vapic_do_enable_tpr_reporting(CPUState *cpu, run_on_cpu_data data)
{
VAPICEnableTPRReporting *info = data;

VAPICEnableTPRReporting *info = data.host_ptr;
apic_enable_tpr_access_reporting(info->apic, info->enable);
}

Expand All @@ -505,7 +504,7 @@ static void vapic_enable_tpr_reporting(bool enable)
CPU_FOREACH(cs) {
cpu = X86_CPU(cs);
info.apic = cpu->apic_state;
run_on_cpu(cs, vapic_do_enable_tpr_reporting, &info);
run_on_cpu(cs, vapic_do_enable_tpr_reporting, RUN_ON_CPU_HOST_PTR(&info));
}
}

Expand Down Expand Up @@ -738,9 +737,9 @@ static void vapic_realize(DeviceState *dev, Error **errp)
nb_option_roms++;
}

static void do_vapic_enable(CPUState *cs, void *data)
static void do_vapic_enable(CPUState *cs, run_on_cpu_data data)
{
VAPICROMState *s = data;
VAPICROMState *s = data.host_ptr;
X86CPU *cpu = X86_CPU(cs);

static const uint8_t enabled = 1;
Expand All @@ -762,7 +761,7 @@ static void kvmvapic_vm_state_change(void *opaque, int running,

if (s->state == VAPIC_ACTIVE) {
if (smp_cpus == 1) {
run_on_cpu(first_cpu, do_vapic_enable, s);
run_on_cpu(first_cpu, do_vapic_enable, RUN_ON_CPU_HOST_PTR(s));
} else {
zero = g_malloc0(s->rom_state.vapic_size);
cpu_physical_memory_write(s->vapic_paddr, zero,
Expand Down
6 changes: 3 additions & 3 deletions hw/ppc/ppce500_spin.c
Expand Up @@ -84,11 +84,11 @@ static void mmubooke_create_initial_mapping(CPUPPCState *env,
env->tlb_dirty = true;
}

static void spin_kick(CPUState *cs, void *data)
static void spin_kick(CPUState *cs, run_on_cpu_data data)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
SpinInfo *curspin = data;
SpinInfo *curspin = data.host_ptr;
hwaddr map_size = 64 * 1024 * 1024;
hwaddr map_start;

Expand Down Expand Up @@ -147,7 +147,7 @@ static void spin_write(void *opaque, hwaddr addr, uint64_t value,

if (!(ldq_p(&curspin->addr) & 1)) {
/* run CPU */
run_on_cpu(cpu, spin_kick, curspin);
run_on_cpu(cpu, spin_kick, RUN_ON_CPU_HOST_PTR(curspin));
}
}

Expand Down
4 changes: 2 additions & 2 deletions hw/ppc/spapr.c
Expand Up @@ -2148,7 +2148,7 @@ static void spapr_machine_finalizefn(Object *obj)
g_free(spapr->kvm_type);
}

static void ppc_cpu_do_nmi_on_cpu(CPUState *cs, void *arg)
static void ppc_cpu_do_nmi_on_cpu(CPUState *cs, run_on_cpu_data arg)
{
cpu_synchronize_state(cs);
ppc_cpu_do_system_reset(cs);
Expand All @@ -2159,7 +2159,7 @@ static void spapr_nmi(NMIState *n, int cpu_index, Error **errp)
CPUState *cs;

CPU_FOREACH(cs) {
async_run_on_cpu(cs, ppc_cpu_do_nmi_on_cpu, NULL);
async_run_on_cpu(cs, ppc_cpu_do_nmi_on_cpu, RUN_ON_CPU_NULL);
}
}

Expand Down
12 changes: 6 additions & 6 deletions hw/ppc/spapr_hcall.c
Expand Up @@ -18,9 +18,9 @@ struct SPRSyncState {
target_ulong mask;
};

static void do_spr_sync(CPUState *cs, void *arg)
static void do_spr_sync(CPUState *cs, run_on_cpu_data arg)
{
struct SPRSyncState *s = arg;
struct SPRSyncState *s = arg.host_ptr;
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;

Expand All @@ -37,7 +37,7 @@ static void set_spr(CPUState *cs, int spr, target_ulong value,
.value = value,
.mask = mask
};
run_on_cpu(cs, do_spr_sync, &s);
run_on_cpu(cs, do_spr_sync, RUN_ON_CPU_HOST_PTR(&s));
}

static bool has_spr(PowerPCCPU *cpu, int spr)
Expand Down Expand Up @@ -911,10 +911,10 @@ typedef struct {
Error *err;
} SetCompatState;

static void do_set_compat(CPUState *cs, void *arg)
static void do_set_compat(CPUState *cs, run_on_cpu_data arg)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
SetCompatState *s = arg;
SetCompatState *s = arg.host_ptr;

cpu_synchronize_state(cs);
ppc_set_compat(cpu, s->cpu_version, &s->err);
Expand Down Expand Up @@ -1017,7 +1017,7 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu_,
.err = NULL,
};

run_on_cpu(cs, do_set_compat, &s);
run_on_cpu(cs, do_set_compat, RUN_ON_CPU_HOST_PTR(&s));

if (s.err) {
error_report_err(s.err);
Expand Down
28 changes: 23 additions & 5 deletions include/qom/cpu.h
Expand Up @@ -231,7 +231,25 @@ struct kvm_run;
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)

/* work queue */
typedef void (*run_on_cpu_func)(CPUState *cpu, void *data);

/* The union type allows passing of 64 bit target pointers on 32 bit
* hosts in a single parameter
*/
typedef union {
int host_int;
unsigned long host_ulong;
void *host_ptr;
vaddr target_ptr;
} run_on_cpu_data;

#define RUN_ON_CPU_HOST_PTR(p) ((run_on_cpu_data){.host_ptr = (p)})
#define RUN_ON_CPU_HOST_INT(i) ((run_on_cpu_data){.host_int = (i)})
#define RUN_ON_CPU_HOST_ULONG(ul) ((run_on_cpu_data){.host_ulong = (ul)})
#define RUN_ON_CPU_TARGET_PTR(v) ((run_on_cpu_data){.target_ptr = (v)})
#define RUN_ON_CPU_NULL RUN_ON_CPU_HOST_PTR(NULL)

typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data);

struct qemu_work_item;

/**
Expand Down Expand Up @@ -637,7 +655,7 @@ bool cpu_is_stopped(CPUState *cpu);
*
* Used internally in the implementation of run_on_cpu.
*/
void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data,
void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
QemuMutex *mutex);

/**
Expand All @@ -648,7 +666,7 @@ void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data,
*
* Schedules the function @func for execution on the vCPU @cpu.
*/
void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data);
void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);

/**
* async_run_on_cpu:
Expand All @@ -658,7 +676,7 @@ void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data);
*
* Schedules the function @func for execution on the vCPU @cpu asynchronously.
*/
void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data);
void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);

/**
* async_safe_run_on_cpu:
Expand All @@ -672,7 +690,7 @@ void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data);
* Unlike run_on_cpu and async_run_on_cpu, the function is run outside the
* BQL.
*/
void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data);
void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);

/**
* qemu_get_cpu:
Expand Down
20 changes: 11 additions & 9 deletions kvm-all.c
Expand Up @@ -1856,7 +1856,7 @@ void kvm_flush_coalesced_mmio_buffer(void)
s->coalesced_flush_in_progress = false;
}

static void do_kvm_cpu_synchronize_state(CPUState *cpu, void *arg)
static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
{
if (!cpu->kvm_vcpu_dirty) {
kvm_arch_get_registers(cpu);
Expand All @@ -1867,30 +1867,30 @@ static void do_kvm_cpu_synchronize_state(CPUState *cpu, void *arg)
void kvm_cpu_synchronize_state(CPUState *cpu)
{
if (!cpu->kvm_vcpu_dirty) {
run_on_cpu(cpu, do_kvm_cpu_synchronize_state, NULL);
run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
}
}

static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, void *arg)
static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
{
kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
cpu->kvm_vcpu_dirty = false;
}

void kvm_cpu_synchronize_post_reset(CPUState *cpu)
{
run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, NULL);
run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
}

static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, void *arg)
static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
{
kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
cpu->kvm_vcpu_dirty = false;
}

void kvm_cpu_synchronize_post_init(CPUState *cpu)
{
run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, NULL);
run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
}

int kvm_cpu_exec(CPUState *cpu)
Expand Down Expand Up @@ -2218,9 +2218,10 @@ struct kvm_set_guest_debug_data {
int err;
};

static void kvm_invoke_set_guest_debug(CPUState *cpu, void *data)
static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data)
{
struct kvm_set_guest_debug_data *dbg_data = data;
struct kvm_set_guest_debug_data *dbg_data =
(struct kvm_set_guest_debug_data *) data.host_ptr;

dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG,
&dbg_data->dbg);
Expand All @@ -2237,7 +2238,8 @@ int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
}
kvm_arch_update_guest_debug(cpu, &data.dbg);

run_on_cpu(cpu, kvm_invoke_set_guest_debug, &data);
run_on_cpu(cpu, kvm_invoke_set_guest_debug,
RUN_ON_CPU_HOST_PTR(&data));
return data.err;
}

Expand Down
8 changes: 4 additions & 4 deletions target-i386/helper.c
Expand Up @@ -1121,9 +1121,9 @@ typedef struct MCEInjectionParams {
int flags;
} MCEInjectionParams;

static void do_inject_x86_mce(CPUState *cs, void *data)
static void do_inject_x86_mce(CPUState *cs, run_on_cpu_data data)
{
MCEInjectionParams *params = data;
MCEInjectionParams *params = data.host_ptr;
X86CPU *cpu = X86_CPU(cs);
CPUX86State *cenv = &cpu->env;
uint64_t *banks = cenv->mce_banks + 4 * params->bank;
Expand Down Expand Up @@ -1230,7 +1230,7 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
return;
}

run_on_cpu(cs, do_inject_x86_mce, &params);
run_on_cpu(cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
if (flags & MCE_INJECT_BROADCAST) {
CPUState *other_cs;

Expand All @@ -1243,7 +1243,7 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
if (other_cs == cs) {
continue;
}
run_on_cpu(other_cs, do_inject_x86_mce, &params);
run_on_cpu(other_cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
}
}
}
Expand Down

0 comments on commit 14e6fe1

Please sign in to comment.