Skip to content

Commit

Permalink
target-ppc: spapr: e500: fix to use cpu_dt_id
Browse files Browse the repository at this point in the history
This makes use of @cpu_dt_id and related API in:
1. emulated XICS hypercall handlers as they receive fixed CPU indexes;
2. XICS-KVM to enable in-kernel XICS on right CPU;
3. device-tree renderer.

This removes @cpu_index fixup as @cpu_dt_id is used instead so QEMU monitor
can accept command-line CPU indexes again.

This changes kvm_arch_vcpu_id() to use ppc_get_vcpu_dt_id() as at the moment
KVM CPU id and device tree ID are calculated using the same algorithm.

Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Acked-by: Mike Day <ncmike@ncultra.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
  • Loading branch information
aik authored and agraf committed Mar 5, 2014
1 parent 0ce470c commit 0f20ba6
Show file tree
Hide file tree
Showing 9 changed files with 41 additions and 25 deletions.
2 changes: 1 addition & 1 deletion hw/intc/openpic_kvm.c
Expand Up @@ -228,7 +228,7 @@ int kvm_openpic_connect_vcpu(DeviceState *d, CPUState *cs)

encap.cap = KVM_CAP_IRQ_MPIC;
encap.args[0] = opp->fd;
encap.args[1] = cs->cpu_index;
encap.args[1] = kvm_arch_vcpu_id(cs);

return kvm_vcpu_ioctl(cs, KVM_ENABLE_CAP, &encap);
}
Expand Down
15 changes: 13 additions & 2 deletions hw/intc/xics.c
Expand Up @@ -33,6 +33,17 @@
#include "qemu/error-report.h"
#include "qapi/visitor.h"

static int get_cpu_index_by_dt_id(int cpu_dt_id)
{
PowerPCCPU *cpu = ppc_get_vcpu_by_dt_id(cpu_dt_id);

if (cpu) {
return cpu->parent_obj.cpu_index;
}

return -1;
}

void xics_cpu_setup(XICSState *icp, PowerPCCPU *cpu)
{
CPUState *cs = CPU(cpu);
Expand Down Expand Up @@ -659,7 +670,7 @@ static target_ulong h_cppr(PowerPCCPU *cpu, sPAPREnvironment *spapr,
static target_ulong h_ipi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
target_ulong opcode, target_ulong *args)
{
target_ulong server = args[0];
target_ulong server = get_cpu_index_by_dt_id(args[0]);
target_ulong mfrr = args[1];

if (server >= spapr->icp->nr_servers) {
Expand Down Expand Up @@ -728,7 +739,7 @@ static void rtas_set_xive(PowerPCCPU *cpu, sPAPREnvironment *spapr,
}

nr = rtas_ld(args, 0);
server = rtas_ld(args, 1);
server = get_cpu_index_by_dt_id(rtas_ld(args, 1));
priority = rtas_ld(args, 2);

if (!ics_valid_irq(ics, nr) || (server >= ics->icp->nr_servers)
Expand Down
10 changes: 5 additions & 5 deletions hw/intc/xics_kvm.c
Expand Up @@ -65,7 +65,7 @@ static void icp_get_kvm_state(ICPState *ss)
ret = kvm_vcpu_ioctl(ss->cs, KVM_GET_ONE_REG, &reg);
if (ret != 0) {
error_report("Unable to retrieve KVM interrupt controller state"
" for CPU %d: %s", ss->cs->cpu_index, strerror(errno));
" for CPU %ld: %s", kvm_arch_vcpu_id(ss->cs), strerror(errno));
exit(1);
}

Expand Down Expand Up @@ -97,7 +97,7 @@ static int icp_set_kvm_state(ICPState *ss, int version_id)
ret = kvm_vcpu_ioctl(ss->cs, KVM_SET_ONE_REG, &reg);
if (ret != 0) {
error_report("Unable to restore KVM interrupt controller state (0x%"
PRIx64 ") for CPU %d: %s", state, ss->cs->cpu_index,
PRIx64 ") for CPU %ld: %s", state, kvm_arch_vcpu_id(ss->cs),
strerror(errno));
return ret;
}
Expand Down Expand Up @@ -325,15 +325,15 @@ static void xics_kvm_cpu_setup(XICSState *icp, PowerPCCPU *cpu)
struct kvm_enable_cap xics_enable_cap = {
.cap = KVM_CAP_IRQ_XICS,
.flags = 0,
.args = {icpkvm->kernel_xics_fd, cs->cpu_index, 0, 0},
.args = {icpkvm->kernel_xics_fd, kvm_arch_vcpu_id(cs), 0, 0},
};

ss->cs = cs;

ret = kvm_vcpu_ioctl(ss->cs, KVM_ENABLE_CAP, &xics_enable_cap);
if (ret < 0) {
error_report("Unable to connect CPU%d to kernel XICS: %s",
cs->cpu_index, strerror(errno));
error_report("Unable to connect CPU%ld to kernel XICS: %s",
kvm_arch_vcpu_id(cs), strerror(errno));
exit(1);
}
}
Expand Down
7 changes: 5 additions & 2 deletions hw/ppc/e500.c
Expand Up @@ -238,6 +238,7 @@ static int ppce500_load_device_tree(QEMUMachineInitArgs *args,
the first node as boot node and be happy */
for (i = smp_cpus - 1; i >= 0; i--) {
CPUState *cpu;
PowerPCCPU *pcpu;
char cpu_name[128];
uint64_t cpu_release_addr = MPC8544_SPIN_BASE + (i * 0x20);

Expand All @@ -246,14 +247,16 @@ static int ppce500_load_device_tree(QEMUMachineInitArgs *args,
continue;
}
env = cpu->env_ptr;
pcpu = POWERPC_CPU(cpu);

snprintf(cpu_name, sizeof(cpu_name), "/cpus/PowerPC,8544@%x",
cpu->cpu_index);
ppc_get_vcpu_dt_id(pcpu));
qemu_fdt_add_subnode(fdt, cpu_name);
qemu_fdt_setprop_cell(fdt, cpu_name, "clock-frequency", clock_freq);
qemu_fdt_setprop_cell(fdt, cpu_name, "timebase-frequency", tb_freq);
qemu_fdt_setprop_string(fdt, cpu_name, "device_type", "cpu");
qemu_fdt_setprop_cell(fdt, cpu_name, "reg", cpu->cpu_index);
qemu_fdt_setprop_cell(fdt, cpu_name, "reg",
ppc_get_vcpu_dt_id(pcpu));
qemu_fdt_setprop_cell(fdt, cpu_name, "d-cache-line-size",
env->dcache_line_size);
qemu_fdt_setprop_cell(fdt, cpu_name, "i-cache-line-size",
Expand Down
9 changes: 5 additions & 4 deletions hw/ppc/spapr.c
Expand Up @@ -207,19 +207,20 @@ static int spapr_fixup_cpu_dt(void *fdt, sPAPREnvironment *spapr)

CPU_FOREACH(cpu) {
DeviceClass *dc = DEVICE_GET_CLASS(cpu);
int index = ppc_get_vcpu_dt_id(POWERPC_CPU(cpu));
uint32_t associativity[] = {cpu_to_be32(0x5),
cpu_to_be32(0x0),
cpu_to_be32(0x0),
cpu_to_be32(0x0),
cpu_to_be32(cpu->numa_node),
cpu_to_be32(cpu->cpu_index)};
cpu_to_be32(index)};

if ((cpu->cpu_index % smt) != 0) {
if ((index % smt) != 0) {
continue;
}

snprintf(cpu_model, 32, "/cpus/%s@%x", dc->fw_name,
cpu->cpu_index);
index);

offset = fdt_path_offset(fdt, cpu_model);
if (offset < 0) {
Expand Down Expand Up @@ -368,7 +369,7 @@ static void *spapr_create_fdt_skel(hwaddr initrd_base,
CPUPPCState *env = &cpu->env;
DeviceClass *dc = DEVICE_GET_CLASS(cs);
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
int index = cs->cpu_index;
int index = ppc_get_vcpu_dt_id(cpu);
uint32_t servers_prop[smp_threads];
uint32_t gservers_prop[smp_threads * 2];
char *nodename;
Expand Down
6 changes: 3 additions & 3 deletions hw/ppc/spapr_hcall.c
Expand Up @@ -482,13 +482,13 @@ static target_ulong h_register_vpa(PowerPCCPU *cpu, sPAPREnvironment *spapr,
target_ulong vpa = args[2];
target_ulong ret = H_PARAMETER;
CPUPPCState *tenv;
CPUState *tcpu;
PowerPCCPU *tcpu;

tcpu = qemu_get_cpu(procno);
tcpu = ppc_get_vcpu_by_dt_id(procno);
if (!tcpu) {
return H_PARAMETER;
}
tenv = tcpu->env_ptr;
tenv = &tcpu->env;

switch (flags) {
case FLAGS_REGISTER_VPA:
Expand Down
14 changes: 7 additions & 7 deletions hw/ppc/spapr_rtas.c
Expand Up @@ -131,17 +131,17 @@ static void rtas_query_cpu_stopped_state(PowerPCCPU *cpu_,
uint32_t nret, target_ulong rets)
{
target_ulong id;
CPUState *cpu;
PowerPCCPU *cpu;

if (nargs != 1 || nret != 2) {
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}

id = rtas_ld(args, 0);
cpu = qemu_get_cpu(id);
cpu = ppc_get_vcpu_by_dt_id(id);
if (cpu != NULL) {
if (cpu->halted) {
if (CPU(cpu)->halted) {
rtas_st(rets, 1, 0);
} else {
rtas_st(rets, 1, 2);
Expand All @@ -161,7 +161,7 @@ static void rtas_start_cpu(PowerPCCPU *cpu_, sPAPREnvironment *spapr,
uint32_t nret, target_ulong rets)
{
target_ulong id, start, r3;
CPUState *cs;
PowerPCCPU *cpu;

if (nargs != 3 || nret != 1) {
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
Expand All @@ -172,9 +172,9 @@ static void rtas_start_cpu(PowerPCCPU *cpu_, sPAPREnvironment *spapr,
start = rtas_ld(args, 1);
r3 = rtas_ld(args, 2);

cs = qemu_get_cpu(id);
if (cs != NULL) {
PowerPCCPU *cpu = POWERPC_CPU(cs);
cpu = ppc_get_vcpu_by_dt_id(id);
if (cpu != NULL) {
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;

if (!cs->halted) {
Expand Down
2 changes: 1 addition & 1 deletion target-ppc/kvm.c
Expand Up @@ -402,7 +402,7 @@ static inline void kvm_fixup_page_sizes(PowerPCCPU *cpu)

unsigned long kvm_arch_vcpu_id(CPUState *cpu)
{
return cpu->cpu_index;
return ppc_get_vcpu_dt_id(POWERPC_CPU(cpu));
}

int kvm_arch_init_vcpu(CPUState *cs)
Expand Down
1 change: 1 addition & 0 deletions target-ppc/translate_init.c
Expand Up @@ -8457,6 +8457,7 @@ static void ppc_cpu_initfn(Object *obj)

cs->env_ptr = env;
cpu_exec_init(env);
cpu->cpu_dt_id = cs->cpu_index;

env->msr_mask = pcc->msr_mask;
env->mmu_model = pcc->mmu_model;
Expand Down

0 comments on commit 0f20ba6

Please sign in to comment.