Browse files

[CORE] Update vmm_vcpu_irq to make it more generic.

With this patch we can run umodified PB-A8 basic test as guest on vexpress-a15-ve. No more code patching required for cortex-a15-ve.

Signed-off-by: Anup Patel <anup@brainfault.org>
  • Loading branch information...
1 parent e42ce73 commit f71762020f3f37011212db36f24a7fc12aeed602 @avpatel avpatel committed Apr 19, 2012
View
2 arch/arm/board/vexpress-a15/openconf.cfg
@@ -28,7 +28,7 @@ config CONFIG_BOARD
config CONFIG_VEXPRESS
bool
select CONFIG_DTC
- select CONFIG_CPATCH
+ select CONFIG_CPATCH if CONFIG_CORTEX_A15
select CONFIG_LIBFDT
select CONFIG_SERIAL
select CONFIG_SERIAL_PL01X
View
2 arch/arm/configs/vexpress-a15-ve-defconfig
@@ -151,6 +151,6 @@ CONFIG_EMU_RTC_PL031=y
# Tool Options
#
CONFIG_DTC=y
-CONFIG_CPATCH=y
+# CONFIG_CPATCH is not set
# CONFIG_BBFLASH is not set
# CONFIG_KALLSYMS_GENERATOR is not set
View
74 arch/arm/cpu/arm32/cpu_vcpu_emulate_arm.c
@@ -379,7 +379,7 @@ static int arm_hypercall_rfe(u32 id, u32 subid, u32 inst,
u32 data;
register int rc;
register u32 cond, Rn, P, U, W;
- register u32 cpsr, address;
+ register u32 address;
arm_funcstat_start(vcpu, ARM_FUNCSTAT_RFE);
cond = ARM_INST_DECODE(inst, ARM_INST_COND_MASK, ARM_INST_COND_SHIFT);
Rn = ARM_INST_BITS(inst,
@@ -393,11 +393,27 @@ static int arm_hypercall_rfe(u32 id, u32 subid, u32 inst,
P = ARM_INST_BIT(inst, ARM_HYPERCALL_RFE_P_START);
U = ARM_INST_BIT(inst, ARM_HYPERCALL_RFE_U_START);
W = ARM_INST_BIT(inst, ARM_HYPERCALL_RFE_W_START);
- cpsr = arm_priv(vcpu)->cpsr & CPSR_MODE_MASK;
- if (cpsr == CPSR_MODE_USER) {
+ switch (arm_priv(vcpu)->cpsr & CPSR_MODE_MASK) {
+ case CPSR_MODE_FIQ:
+ vmm_vcpu_irq_deassert(vcpu, CPU_EXTERNAL_FIQ);
+ break;
+ case CPSR_MODE_IRQ:
+ vmm_vcpu_irq_deassert(vcpu, CPU_EXTERNAL_IRQ);
+ break;
+ case CPSR_MODE_SUPERVISOR:
+ vmm_vcpu_irq_deassert(vcpu, CPU_SOFT_IRQ);
+ break;
+ case CPSR_MODE_ABORT:
+ vmm_vcpu_irq_deassert(vcpu, CPU_PREFETCH_ABORT_IRQ);
+ vmm_vcpu_irq_deassert(vcpu, CPU_DATA_ABORT_IRQ);
+ break;
+ case CPSR_MODE_UNDEFINED:
+ vmm_vcpu_irq_deassert(vcpu, CPU_UNDEF_INST_IRQ);
+ break;
+ default:
arm_unpredictable(regs, vcpu);
return VMM_EFAIL;
- }
+ };
address = cpu_vcpu_reg_read(vcpu, regs, Rn);
address = (U == 1) ? address : (address - 8);
address = (P == U) ? (address + 4) : address;
@@ -418,8 +434,6 @@ static int arm_hypercall_rfe(u32 id, u32 subid, u32 inst,
address = (U == 1) ? (address + 8) : (address - 8);
cpu_vcpu_reg_write(vcpu, regs, Rn, address);
}
- /* Steps unique to exception return */
- vmm_vcpu_irq_deassert(vcpu);
} else {
regs->pc += 4;
}
@@ -519,12 +533,27 @@ int arm_hypercall_ldm_ue(u32 id, u32 inst,
return VMM_EFAIL;
}
if (arm_condition_passed(cond, regs)) {
- cpsr = arm_priv(vcpu)->cpsr & CPSR_MODE_MASK;
- if ((cpsr == CPSR_MODE_USER) ||
- (cpsr == CPSR_MODE_SYSTEM)) {
+ switch (arm_priv(vcpu)->cpsr & CPSR_MODE_MASK) {
+ case CPSR_MODE_FIQ:
+ vmm_vcpu_irq_deassert(vcpu, CPU_EXTERNAL_FIQ);
+ break;
+ case CPSR_MODE_IRQ:
+ vmm_vcpu_irq_deassert(vcpu, CPU_EXTERNAL_IRQ);
+ break;
+ case CPSR_MODE_SUPERVISOR:
+ vmm_vcpu_irq_deassert(vcpu, CPU_SOFT_IRQ);
+ break;
+ case CPSR_MODE_ABORT:
+ vmm_vcpu_irq_deassert(vcpu, CPU_PREFETCH_ABORT_IRQ);
+ vmm_vcpu_irq_deassert(vcpu, CPU_DATA_ABORT_IRQ);
+ break;
+ case CPSR_MODE_UNDEFINED:
+ vmm_vcpu_irq_deassert(vcpu, CPU_UNDEF_INST_IRQ);
+ break;
+ default:
arm_unpredictable(regs, vcpu);
return VMM_EFAIL;
- }
+ };
mask = 0x1;
length = 4;
for (i = 0; i < 15; i++) {
@@ -576,8 +605,6 @@ int arm_hypercall_ldm_ue(u32 id, u32 inst,
cpsr = cpu_vcpu_spsr_retrieve(vcpu);
cpu_vcpu_cpsr_update(vcpu, regs, cpsr, CPSR_ALLBITS_MASK);
regs->pc = data;
- /* Steps unique to exception return */
- vmm_vcpu_irq_deassert(vcpu);
} else {
regs->pc += 4;
}
@@ -735,6 +762,27 @@ int arm_hypercall_subs_rel(u32 id, u32 inst,
ARM_HYPERCALL_SUBS_REL_RN_START);
register_form = (id == ARM_HYPERCALL_SUBS_REL_ID0) ? TRUE : FALSE;
if (arm_condition_passed(cond, regs)) {
+ switch (arm_priv(vcpu)->cpsr & CPSR_MODE_MASK) {
+ case CPSR_MODE_FIQ:
+ vmm_vcpu_irq_deassert(vcpu, CPU_EXTERNAL_FIQ);
+ break;
+ case CPSR_MODE_IRQ:
+ vmm_vcpu_irq_deassert(vcpu, CPU_EXTERNAL_IRQ);
+ break;
+ case CPSR_MODE_SUPERVISOR:
+ vmm_vcpu_irq_deassert(vcpu, CPU_SOFT_IRQ);
+ break;
+ case CPSR_MODE_ABORT:
+ vmm_vcpu_irq_deassert(vcpu, CPU_PREFETCH_ABORT_IRQ);
+ vmm_vcpu_irq_deassert(vcpu, CPU_DATA_ABORT_IRQ);
+ break;
+ case CPSR_MODE_UNDEFINED:
+ vmm_vcpu_irq_deassert(vcpu, CPU_UNDEF_INST_IRQ);
+ break;
+ default:
+ arm_unpredictable(regs, vcpu);
+ return VMM_EFAIL;
+ };
if (register_form) {
imm5 = ARM_INST_BITS(inst,
ARM_HYPERCALL_SUBS_REL_IMM5_END,
@@ -823,8 +871,6 @@ int arm_hypercall_subs_rel(u32 id, u32 inst,
spsr = cpu_vcpu_spsr_retrieve(vcpu);
cpu_vcpu_cpsr_update(vcpu, regs, spsr, CPSR_ALLBITS_MASK);
regs->pc = result;
- /* Steps unique to exception return */
- vmm_vcpu_irq_deassert(vcpu);
} else {
regs->pc += 4;
}
View
13 arch/arm/cpu/arm32/cpu_vcpu_irq.c
@@ -68,6 +68,12 @@ u32 arch_vcpu_irq_priority(struct vmm_vcpu * vcpu, u32 irq_no)
return ret;
}
+int arch_vcpu_irq_assert(struct vmm_vcpu * vcpu, u32 irq_no, u32 reason)
+{
+ /* We don't implement this. */
+ return VMM_OK;
+}
+
int arch_vcpu_irq_execute(struct vmm_vcpu * vcpu,
arch_regs_t * regs,
u32 irq_no, u32 reason)
@@ -158,3 +164,10 @@ int arch_vcpu_irq_execute(struct vmm_vcpu * vcpu,
return VMM_OK;
}
+
+int arch_vcpu_irq_deassert(struct vmm_vcpu * vcpu, u32 irq_no, u32 reason)
+{
+ /* We don't implement this. */
+ return VMM_OK;
+}
+
View
2 arch/arm/cpu/arm32/include/arch_cpu.h
@@ -69,9 +69,11 @@ void arch_cpu_wait_for_irq(void);
/** VCPU Interrupt functions required by VMM core */
u32 arch_vcpu_irq_count(struct vmm_vcpu * vcpu);
u32 arch_vcpu_irq_priority(struct vmm_vcpu * vcpu, u32 irq_no);
+int arch_vcpu_irq_assert(struct vmm_vcpu * vcpu, u32 irq_no, u32 reason);
int arch_vcpu_irq_execute(struct vmm_vcpu * vcpu,
arch_regs_t * regs,
u32 irq_no, u32 reason);
+int arch_vcpu_irq_deassert(struct vmm_vcpu * vcpu, u32 irq_no, u32 reason);
/** Timer functions required by VMM core */
int arch_cpu_clockevent_start(u64 tick_nsecs);
View
21 arch/arm/cpu/arm32ve/cpu_vcpu_cp15.c
@@ -48,29 +48,23 @@ static int cpu_vcpu_cp15_stage2_map(struct vmm_vcpu * vcpu,
int rc;
u32 reg_flags = 0x0;
struct cpu_page pg;
+ physical_size_t availsz;
vmm_memset(&pg, 0, sizeof(pg));
pg.ia = fipa & TTBL_L3_MAP_MASK;
-
- if ((pg.ia & TTBL_L2_MAP_MASK) == pg.ia) {
- pg.sz = TTBL_L2_BLOCK_SIZE;
- } else {
- pg.sz = TTBL_L3_BLOCK_SIZE;
- }
+ pg.sz = TTBL_L3_BLOCK_SIZE;
rc = vmm_guest_physical_map(vcpu->guest, pg.ia, pg.sz,
- &pg.oa, &pg.sz, &reg_flags);
+ &pg.oa, &availsz, &reg_flags);
if (rc) {
return rc;
}
- if (pg.sz < TTBL_L3_BLOCK_SIZE) {
+ if (availsz < TTBL_L3_BLOCK_SIZE) {
return VMM_EFAIL;
}
- pg.sz = cpu_mmu_best_page_size(pg.ia, pg.oa, pg.sz);
-
if (reg_flags & VMM_REGION_VIRTUAL) {
pg.af = 0;
pg.ap = TTBL_HAP_NOACCESS;
@@ -92,7 +86,12 @@ static int cpu_vcpu_cp15_stage2_map(struct vmm_vcpu * vcpu,
* }
*/
- return cpu_mmu_map_page(arm_priv(vcpu)->cp15.ttbl, &pg);
+ rc = cpu_mmu_map_page(arm_priv(vcpu)->cp15.ttbl, &pg);
+ if (rc) {
+ return rc;
+ }
+
+ return VMM_OK;
}
int cpu_vcpu_cp15_inst_abort(struct vmm_vcpu * vcpu,
View
30 arch/arm/cpu/arm32ve/cpu_vcpu_emulate.c
@@ -142,39 +142,11 @@ int cpu_vcpu_emulate_mrrc_cp14(struct vmm_vcpu * vcpu,
return VMM_EFAIL;
}
-static int cpu_vcpu_hvc_movs_pc_lr(struct vmm_vcpu * vcpu,
- arch_regs_t * regs,
- u32 il)
-{
- u32 hcr, spsr;
-
- /* Clear VI, VF from HCR */
- hcr = read_hcr();
- hcr &= ~(HCR_VI_MASK | HCR_VF_MASK);
- arm_priv(vcpu)->hcr = hcr;
- write_hcr(hcr);
-
- /* Update PC */
- regs->pc = cpu_vcpu_reg_read(vcpu, regs, 14);
-
- /* Update CPSR */
- spsr = cpu_vcpu_spsr_retrieve(vcpu, regs->cpsr & CPSR_MODE_MASK);
- regs->cpsr = spsr;
-
- return VMM_OK;
-}
-
+/* TODO: To be implemeted later */
int cpu_vcpu_emulate_hvc(struct vmm_vcpu * vcpu,
arch_regs_t * regs,
u32 il, u32 iss)
{
- switch(iss & 0xFFFF) {
- case 0:
- return cpu_vcpu_hvc_movs_pc_lr(vcpu, regs, il);
- default:
- break;
- };
-
return VMM_EFAIL;
}
View
49 arch/arm/cpu/arm32ve/cpu_vcpu_irq.c
@@ -68,30 +68,54 @@ u32 arch_vcpu_irq_priority(struct vmm_vcpu * vcpu, u32 irq_no)
return ret;
}
-int arch_vcpu_irq_execute(struct vmm_vcpu * vcpu,
- arch_regs_t * regs,
- u32 irq_no, u32 reason)
+int arch_vcpu_irq_assert(struct vmm_vcpu * vcpu, u32 irq_no, u32 reason)
{
- u32 hcr = read_hcr();
+ u32 hcr = arm_priv(vcpu)->hcr;
switch(irq_no) {
case CPU_DATA_ABORT_IRQ:
hcr |= HCR_VA_MASK;
/* VA bit is auto-cleared */
break;
case CPU_EXTERNAL_IRQ:
- if (regs->cpsr & CPSR_IRQ_DISABLED) {
- return VMM_EFAIL;
- }
hcr |= HCR_VI_MASK;
- /* VI bit will be cleared by "HVC #0" instruction */
+ /* VI bit will be cleared on deassertion */
break;
case CPU_EXTERNAL_FIQ:
- if (regs->cpsr & CPSR_FIQ_DISABLED) {
- return VMM_EFAIL;
- }
hcr |= HCR_VF_MASK;
- /* VF bit will be cleared by "HVC #0" instruction */
+ /* VF bit will be cleared on deassertion */
+ break;
+ default:
+ return VMM_EFAIL;
+ break;
+ };
+
+ arm_priv(vcpu)->hcr = hcr;
+
+ return VMM_OK;
+}
+
+
+int arch_vcpu_irq_execute(struct vmm_vcpu * vcpu,
+ arch_regs_t * regs,
+ u32 irq_no, u32 reason)
+{
+ /* Updated HCR in HW */
+ write_hcr(arm_priv(vcpu)->hcr);
+
+ return VMM_OK;
+}
+
+int arch_vcpu_irq_deassert(struct vmm_vcpu * vcpu, u32 irq_no, u32 reason)
+{
+ u32 hcr = read_hcr();
+
+ switch(irq_no) {
+ case CPU_EXTERNAL_IRQ:
+ hcr &= ~HCR_VI_MASK;
+ break;
+ case CPU_EXTERNAL_FIQ:
+ hcr &= ~HCR_VF_MASK;
break;
default:
return VMM_EFAIL;
@@ -103,3 +127,4 @@ int arch_vcpu_irq_execute(struct vmm_vcpu * vcpu,
return VMM_OK;
}
+
View
158 arch/arm/cpu/arm32ve/elf2cpatch.py
@@ -1,158 +0,0 @@
-#!/usr/bin/python
-#/**
-# Copyright (c) 2012 Anup Patel.
-# All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2, or (at your option)
-# any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-#
-# @file elf2cpatch.py
-# @author Anup Patel (anup@brainfault.org)
-# @brief Script to generate cpatch script from guest OS ELF
-# */
-
-# We need to trap the guest OS on interrupt return so we replace
-# exception return instructions with HVC calls:
-#
-# MOVS PC, LR => HVC #0
-#
-
-import os
-import sys
-from optparse import OptionParser
-
-usage = "Usage: %prog [options] <section1> [<section2> <section3> ...]"
-parser = OptionParser(usage=usage)
-parser.add_option("-f", "--file", dest="filename",
- help="Input ARM ELF32 file", metavar="FILE")
-parser.add_option("-q", "--quiet",
- action="store_false", dest="verbose", default=True,
- help="Don't print status messages to stdout")
-
-(options, secs) = parser.parse_args()
-
-if not options.filename:
- print "Error: No input ARM ELF32 file"
- sys.exit()
-
-if len(secs)==0:
- print "Error: No sections to scan"
- sys.exit()
-
-dumpcmd = os.environ.get("CROSS_COMPILE") + "objdump -d " + options.filename
-
-# Initialize data structures
-lines = [];
-lsyms = [];
-lsecs = [];
-vlnums = [];
-vsymdec = [];
-addr2lnum = {};
-sym2base = {};
-
-# Populate data structures
-sec = ""
-sec_valid = 0
-lnum = 0
-sym = ""
-base = 0
-p = os.popen(dumpcmd,"r")
-while 1:
- l = p.readline()
- if not l: break
- l = l.strip(" ");
- l = l.replace("\n","");
- l = l.replace("\t"," ");
- while l.count(" ")>0:
- l = l.replace(" "," ")
- w = l.split(" ");
- if len(w)>3 and w[0]=="Disassembly" and w[1]=="of" and w[2]=="section":
- w[3] = w[3].replace(":", "")
- sec = ""
- sec_valid = 0
- for i, s in enumerate(secs):
- if w[3]==s:
- sec = s
- sec_valid = 1
- break
- elif sec_valid==1:
- if len(w)>2:
- addr = base | int(w[0].replace(":",""), 16)
- lines.append(l)
- lsecs.append(sec)
- lsyms.append(sym)
- addr2lnum[addr] = lnum
- vlnums.append(True)
- vsymdec.append(False)
- lnum += 1
- elif len(w)==2:
- if not(w[1].startswith("<") and w[1].endswith(">:")):
- continue
- base = int(w[0].replace(":",""), 16)
- w[1] = w[1].replace("<","")
- w[1] = w[1].replace(">:","")
- sym = w[1]
- lines.append(l)
- sym2base[sym] = base
- lsecs.append(sec)
- lsyms.append(sym)
- vlnums.append(False)
- vsymdec.append(True)
- lnum += 1
-
-
-# MOVS PC, LR (SUBS PC, LR and related instructions)
-# Syntax:
-# MOVS<c> pc, lr, #<const>
-# Fields:
-# cond = bits[31:28]
-# opcode = bits[24:21]
-# Rn = bits[19:16]
-# imm5 = bits[11:7]
-# type = bits[6:5]
-# Rm = bits[3:0]
-# Hypercall Fields:
-# inst_cond[31:28] = cond
-# inst_op[27:24] = 0x1
-# inst_op[23:20] = 0x4
-# inst_fields[19:8] = 0x0
-# inst_op[7:4] = 0x7
-# inst_fields[3:0] = 0x0
-def convert_movs_pc_lr_inst(hxstr):
- hx = int(hxstr, 16)
- cond = (hx >> 28) & 0xF
- rethx = 0x01400070
- rethx = rethx | (cond << 28)
- return rethx
-
-psec = ""
-for ln, l in enumerate(lines):
- if vlnums[ln]:
- sec = lsecs[ln];
- if sec!=psec:
- print "section," + sec
- psec = sec
- w = l.split(" ")
- if len(w)<3:
- continue
- w[0] = w[0].replace(":", "")
- addr = int(w[0], 16)
- if (len(w)>1):
- if (w[1]=="Address"):
- continue
- if len(w)>=5:
- if (w[2]=="movs" and w[3]=="pc," and w[4]=="lr"):
- print "\t#", w[2], w[3], w[4]
- print "\twrite32,0x%x,0x%08x" % (addr, convert_movs_pc_lr_inst(w[1]))
-
View
2 arch/arm/cpu/arm32ve/include/arch_cpu.h
@@ -69,9 +69,11 @@ void arch_cpu_wait_for_irq(void);
/** VCPU Interrupt functions required by VMM core */
u32 arch_vcpu_irq_count(struct vmm_vcpu * vcpu);
u32 arch_vcpu_irq_priority(struct vmm_vcpu * vcpu, u32 irq_no);
+int arch_vcpu_irq_assert(struct vmm_vcpu * vcpu, u32 irq_no, u32 reason);
int arch_vcpu_irq_execute(struct vmm_vcpu * vcpu,
arch_regs_t * regs,
u32 irq_no, u32 reason);
+int arch_vcpu_irq_deassert(struct vmm_vcpu * vcpu, u32 irq_no, u32 reason);
/** Timer functions required by VMM core */
int arch_cpu_clockevent_start(u64 tick_nsecs);
View
14 arch/mips/cpu/mips32r2/cpu_vcpu_irq.c
@@ -36,9 +36,23 @@ u32 arch_vcpu_irq_priority(struct vmm_vcpu * vcpu, u32 irq_no)
return 1;
}
+/* FIXME: */
+int arch_vcpu_irq_assert(struct vmm_vcpu * vcpu, u32 irq_no, u32 reason)
+{
+ return VMM_OK;
+}
+
+/* FIXME: */
int arch_vcpu_irq_execute(struct vmm_vcpu * vcpu,
arch_regs_t * regs,
u32 irq_no, u32 reason)
{
return VMM_OK;
}
+
+/* FIXME: */
+int arch_vcpu_irq_deassert(struct vmm_vcpu * vcpu, u32 irq_no, u32 reason)
+{
+ return VMM_OK;
+}
+
View
8 arch/mips/cpu/mips32r2/include/arch_cpu.h
@@ -65,12 +65,14 @@ void arch_cpu_irq_restore(irq_flags_t flags);
void arch_cpu_wait_for_irq(void);
/** VCPU Interrupt functions required by VMM core */
-s32 arch_vcpu_irq_execute(struct vmm_vcpu *vcpu,
+u32 arch_vcpu_irq_count(struct vmm_vcpu * vcpu);
+u32 arch_vcpu_irq_priority(struct vmm_vcpu * vcpu, u32 irq_no);
+int arch_vcpu_irq_deassert(struct vmm_vcpu * vcpu, u32 irq_no, u32 reason);
+int arch_vcpu_irq_execute(struct vmm_vcpu *vcpu,
arch_regs_t *regs,
u32 interrupt_no,
u32 reason);
-u32 arch_vcpu_irq_priority(struct vmm_vcpu * vcpu, u32 irq_no);
-u32 arch_vcpu_irq_count(struct vmm_vcpu * vcpu);
+int arch_vcpu_irq_assert(struct vmm_vcpu * vcpu, u32 irq_no, u32 reason);
/** Timer functions required by VMM core */
int arch_cpu_clockevent_start(u64 tick_nsecs);
View
14 arch/x86/cpu/x86_64/cpu_vcpu_irq.c
@@ -36,9 +36,23 @@ u32 arch_vcpu_irq_priority(struct vmm_vcpu * vcpu, u32 irq_no)
return 1;
}
+/* FIXME: */
+int arch_vcpu_irq_assert(struct vmm_vcpu * vcpu, u32 irq_no, u32 reason)
+{
+ return VMM_OK;
+}
+
+/* FIXME: */
int arch_vcpu_irq_execute(struct vmm_vcpu * vcpu,
arch_regs_t * regs,
u32 irq_no, u32 reason)
{
return VMM_OK;
}
+
+/* FIXME: */
+int arch_vcpu_irq_deassert(struct vmm_vcpu * vcpu, u32 irq_no, u32 reason)
+{
+ return VMM_OK;
+}
+
View
8 arch/x86/cpu/x86_64/include/arch_cpu.h
@@ -65,12 +65,14 @@ void arch_cpu_irq_restore(irq_flags_t flags);
void arch_cpu_wait_for_irq(void);
/** VCPU Interrupt functions required by VMM core */
-s32 arch_vcpu_irq_execute(struct vmm_vcpu *vcpu,
+u32 arch_vcpu_irq_count(struct vmm_vcpu * vcpu);
+u32 arch_vcpu_irq_priority(struct vmm_vcpu * vcpu, u32 irq_no);
+int arch_vcpu_irq_deassert(struct vmm_vcpu * vcpu, u32 irq_no, u32 reason);
+int arch_vcpu_irq_execute(struct vmm_vcpu *vcpu,
arch_regs_t *regs,
u32 interrupt_no,
u32 reason);
-u32 arch_vcpu_irq_priority(struct vmm_vcpu * vcpu, u32 irq_no);
-u32 arch_vcpu_irq_count(struct vmm_vcpu * vcpu);
+int arch_vcpu_irq_assert(struct vmm_vcpu * vcpu, u32 irq_no, u32 reason);
/** Timer functions required by VMM core */
int arch_cpu_clockevent_start(u64 tick_nsecs);
View
1 core/include/vmm_manager.h
@@ -72,6 +72,7 @@ struct vmm_guest_aspace {
struct vmm_vcpu_irqs {
bool *assert;
+ bool *execute;
u32 *reason;
u32 depth;
u64 assert_count;
View
2 core/include/vmm_vcpu_irq.h
@@ -33,7 +33,7 @@ void vmm_vcpu_irq_process(arch_regs_t * regs);
void vmm_vcpu_irq_assert(struct vmm_vcpu *vcpu, u32 irq_no, u32 reason);
/** Deassert active irq of given vcpu */
-void vmm_vcpu_irq_deassert(struct vmm_vcpu *vcpu);
+void vmm_vcpu_irq_deassert(struct vmm_vcpu *vcpu, u32 irq_no);
/** Wait for irq on given vcpu */
int vmm_vcpu_irq_wait(struct vmm_vcpu *vcpu);
View
30 core/vmm_vcpu_irq.c
@@ -61,8 +61,8 @@ void vmm_vcpu_irq_process(arch_regs_t * regs)
/* If irq number found then execute it */
if (irq_no != -1) {
if (arch_vcpu_irq_execute(vcpu, regs, irq_no, irq_reas) == VMM_OK) {
- vcpu->irqs.reason[irq_no] = 0x0;
vcpu->irqs.assert[irq_no] = FALSE;
+ vcpu->irqs.execute[irq_no] = TRUE;
vcpu->irqs.execute_count++;
}
}
@@ -81,9 +81,11 @@ void vmm_vcpu_irq_assert(struct vmm_vcpu *vcpu, u32 irq_no, u32 reason)
/* Assert the irq */
if (!vcpu->irqs.assert[irq_no]) {
- vcpu->irqs.reason[irq_no] = reason;
- vcpu->irqs.assert[irq_no] = TRUE;
- vcpu->irqs.assert_count++;
+ if (arch_vcpu_irq_assert(vcpu, irq_no, reason) == VMM_OK) {
+ vcpu->irqs.reason[irq_no] = reason;
+ vcpu->irqs.assert[irq_no] = TRUE;
+ vcpu->irqs.assert_count++;
+ }
}
/* If vcpu was waiting for irq then resume it. */
@@ -93,15 +95,27 @@ void vmm_vcpu_irq_assert(struct vmm_vcpu *vcpu, u32 irq_no, u32 reason)
}
}
-void vmm_vcpu_irq_deassert(struct vmm_vcpu *vcpu)
+void vmm_vcpu_irq_deassert(struct vmm_vcpu *vcpu, u32 irq_no)
{
+ u32 reason;
+
/* For non-normal vcpu dont do anything */
if (!vcpu || !vcpu->is_normal) {
return;
}
- /* Increment deassert count */
- vcpu->irqs.deassert_count++;
+ /* Deassert the irq */
+ if (vcpu->irqs.execute[irq_no]) {
+ reason = vcpu->irqs.reason[irq_no];
+ if (arch_vcpu_irq_deassert(vcpu, irq_no, reason) == VMM_OK) {
+ vcpu->irqs.deassert_count++;
+ }
+ }
+
+ /* Ensure irq is not asserted and not executing */
+ vcpu->irqs.reason[irq_no] = 0x0;
+ vcpu->irqs.assert[irq_no] = FALSE;
+ vcpu->irqs.execute[irq_no] = FALSE;
}
int vmm_vcpu_irq_wait(struct vmm_vcpu *vcpu)
@@ -146,6 +160,7 @@ int vmm_vcpu_irq_init(struct vmm_vcpu *vcpu)
/* Allocate memory for arrays */
vcpu->irqs.assert = vmm_malloc(sizeof(bool) * irq_count);
+ vcpu->irqs.execute = vmm_malloc(sizeof(bool) * irq_count);
vcpu->irqs.reason = vmm_malloc(sizeof(u32) * irq_count);
}
@@ -161,6 +176,7 @@ int vmm_vcpu_irq_init(struct vmm_vcpu *vcpu)
for (ite = 0; ite < irq_count; ite++) {
vcpu->irqs.reason[ite] = 0;
vcpu->irqs.assert[ite] = FALSE;
+ vcpu->irqs.execute[ite] = FALSE;
}
/* Clear wait for irq flag */

0 comments on commit f717620

Please sign in to comment.