Skip to content

Commit

Permalink
xen/ioreq: Move x86's io_completion/io_req fields to struct vcpu
Browse files Browse the repository at this point in the history
The IOREQ is a common feature now and these fields will be used
on Arm as is. Move them to common struct vcpu as a part of new
struct vcpu_io and drop duplicating "io" prefixes. Also move
enum hvm_io_completion to xen/sched.h and remove "hvm" prefixes.

This patch completely removes layering violation in the common code.

Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
Reviewed-by: Julien Grall <jgrall@amazon.com>
Reviewed-by: Paul Durrant <paul@xen.org>
Acked-by: Jan Beulich <jbeulich@suse.com>
CC: Julien Grall <julien.grall@arm.com>
[On Arm only]
Tested-by: Wei Chen <Wei.Chen@arm.com>
  • Loading branch information
Oleksandr Tyshchenko authored and Julien Grall committed Jan 29, 2021
1 parent e6ddac9 commit fe5df63
Show file tree
Hide file tree
Showing 11 changed files with 164 additions and 156 deletions.
210 changes: 105 additions & 105 deletions xen/arch/x86/hvm/emulate.c

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion xen/arch/x86/hvm/hvm.c
Original file line number Diff line number Diff line change
Expand Up @@ -3800,7 +3800,7 @@ void hvm_ud_intercept(struct cpu_user_regs *regs)
return;
}

switch ( hvm_emulate_one(&ctxt, HVMIO_no_completion) )
switch ( hvm_emulate_one(&ctxt, VIO_no_completion) )
{
case X86EMUL_UNHANDLEABLE:
case X86EMUL_UNIMPLEMENTED:
Expand Down
32 changes: 16 additions & 16 deletions xen/arch/x86/hvm/io.c
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ bool hvm_emulate_one_insn(hvm_emulate_validate_t *validate, const char *descr)

hvm_emulate_init_once(&ctxt, validate, guest_cpu_user_regs());

switch ( rc = hvm_emulate_one(&ctxt, HVMIO_no_completion) )
switch ( rc = hvm_emulate_one(&ctxt, VIO_no_completion) )
{
case X86EMUL_UNHANDLEABLE:
hvm_dump_emulation_state(XENLOG_G_WARNING, descr, &ctxt, rc);
Expand All @@ -109,20 +109,20 @@ bool hvm_emulate_one_insn(hvm_emulate_validate_t *validate, const char *descr)
bool handle_mmio_with_translation(unsigned long gla, unsigned long gpfn,
struct npfec access)
{
struct hvm_vcpu_io *vio = &current->arch.hvm.hvm_io;
struct hvm_vcpu_io *hvio = &current->arch.hvm.hvm_io;

vio->mmio_access = access.gla_valid &&
access.kind == npfec_kind_with_gla
? access : (struct npfec){};
vio->mmio_gla = gla & PAGE_MASK;
vio->mmio_gpfn = gpfn;
hvio->mmio_access = access.gla_valid &&
access.kind == npfec_kind_with_gla
? access : (struct npfec){};
hvio->mmio_gla = gla & PAGE_MASK;
hvio->mmio_gpfn = gpfn;
return handle_mmio();
}

bool handle_pio(uint16_t port, unsigned int size, int dir)
{
struct vcpu *curr = current;
struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
struct vcpu_io *vio = &curr->io;
unsigned int data;
int rc;

Expand All @@ -135,8 +135,8 @@ bool handle_pio(uint16_t port, unsigned int size, int dir)

rc = hvmemul_do_pio_buffer(port, size, dir, &data);

if ( ioreq_needs_completion(&vio->io_req) )
vio->io_completion = HVMIO_pio_completion;
if ( ioreq_needs_completion(&vio->req) )
vio->completion = VIO_pio_completion;

switch ( rc )
{
Expand Down Expand Up @@ -175,7 +175,7 @@ static bool_t g2m_portio_accept(const struct hvm_io_handler *handler,
{
struct vcpu *curr = current;
const struct hvm_domain *hvm = &curr->domain->arch.hvm;
struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
struct hvm_vcpu_io *hvio = &curr->arch.hvm.hvm_io;
struct g2m_ioport *g2m_ioport;
unsigned int start, end;

Expand All @@ -185,7 +185,7 @@ static bool_t g2m_portio_accept(const struct hvm_io_handler *handler,
end = start + g2m_ioport->np;
if ( (p->addr >= start) && (p->addr + p->size <= end) )
{
vio->g2m_ioport = g2m_ioport;
hvio->g2m_ioport = g2m_ioport;
return 1;
}
}
Expand All @@ -196,8 +196,8 @@ static bool_t g2m_portio_accept(const struct hvm_io_handler *handler,
static int g2m_portio_read(const struct hvm_io_handler *handler,
uint64_t addr, uint32_t size, uint64_t *data)
{
struct hvm_vcpu_io *vio = &current->arch.hvm.hvm_io;
const struct g2m_ioport *g2m_ioport = vio->g2m_ioport;
struct hvm_vcpu_io *hvio = &current->arch.hvm.hvm_io;
const struct g2m_ioport *g2m_ioport = hvio->g2m_ioport;
unsigned int mport = (addr - g2m_ioport->gport) + g2m_ioport->mport;

switch ( size )
Expand All @@ -221,8 +221,8 @@ static int g2m_portio_read(const struct hvm_io_handler *handler,
static int g2m_portio_write(const struct hvm_io_handler *handler,
uint64_t addr, uint32_t size, uint64_t data)
{
struct hvm_vcpu_io *vio = &current->arch.hvm.hvm_io;
const struct g2m_ioport *g2m_ioport = vio->g2m_ioport;
struct hvm_vcpu_io *hvio = &current->arch.hvm.hvm_io;
const struct g2m_ioport *g2m_ioport = hvio->g2m_ioport;
unsigned int mport = (addr - g2m_ioport->gport) + g2m_ioport->mport;

switch ( size )
Expand Down
6 changes: 3 additions & 3 deletions xen/arch/x86/hvm/ioreq.c
Original file line number Diff line number Diff line change
Expand Up @@ -40,11 +40,11 @@ bool arch_ioreq_complete_mmio(void)
return handle_mmio();
}

bool arch_vcpu_ioreq_completion(enum hvm_io_completion io_completion)
bool arch_vcpu_ioreq_completion(enum vio_completion completion)
{
switch ( io_completion )
switch ( completion )
{
case HVMIO_realmode_completion:
case VIO_realmode_completion:
{
struct hvm_emulate_ctxt ctxt;

Expand Down
2 changes: 1 addition & 1 deletion xen/arch/x86/hvm/svm/nestedsvm.c
Original file line number Diff line number Diff line change
Expand Up @@ -1266,7 +1266,7 @@ enum hvm_intblk nsvm_intr_blocked(struct vcpu *v)
* Delay the injection because this would result in delivering
* an interrupt *within* the execution of an instruction.
*/
if ( v->arch.hvm.hvm_io.io_req.state != STATE_IOREQ_NONE )
if ( v->io.req.state != STATE_IOREQ_NONE )
return hvm_intblk_shadow;

if ( !nv->nv_vmexit_pending && n2vmcb->exit_int_info.v )
Expand Down
8 changes: 4 additions & 4 deletions xen/arch/x86/hvm/vmx/realmode.c
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt)

perfc_incr(realmode_emulations);

rc = hvm_emulate_one(hvmemul_ctxt, HVMIO_realmode_completion);
rc = hvm_emulate_one(hvmemul_ctxt, VIO_realmode_completion);

if ( rc == X86EMUL_UNHANDLEABLE )
{
Expand Down Expand Up @@ -153,7 +153,7 @@ void vmx_realmode(struct cpu_user_regs *regs)
struct vcpu *curr = current;
struct hvm_emulate_ctxt hvmemul_ctxt;
struct segment_register *sreg;
struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
struct hvm_vcpu_io *hvio = &curr->arch.hvm.hvm_io;
unsigned long intr_info;
unsigned int emulations = 0;

Expand Down Expand Up @@ -188,7 +188,7 @@ void vmx_realmode(struct cpu_user_regs *regs)

vmx_realmode_emulate_one(&hvmemul_ctxt);

if ( vio->io_req.state != STATE_IOREQ_NONE || vio->mmio_retry )
if ( curr->io.req.state != STATE_IOREQ_NONE || hvio->mmio_retry )
break;

/* Stop emulating unless our segment state is not safe */
Expand All @@ -202,7 +202,7 @@ void vmx_realmode(struct cpu_user_regs *regs)
}

/* Need to emulate next time if we've started an IO operation */
if ( vio->io_req.state != STATE_IOREQ_NONE )
if ( curr->io.req.state != STATE_IOREQ_NONE )
curr->arch.hvm.vmx.vmx_emulate = 1;

if ( !curr->arch.hvm.vmx.vmx_emulate && !curr->arch.hvm.vmx.vmx_realmode )
Expand Down
26 changes: 13 additions & 13 deletions xen/common/ioreq.c
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ static bool hvm_wait_for_io(struct ioreq_vcpu *sv, ioreq_t *p)
break;
}

p = &sv->vcpu->arch.hvm.hvm_io.io_req;
p = &sv->vcpu->io.req;
if ( ioreq_needs_completion(p) )
p->data = data;

Expand All @@ -171,10 +171,10 @@ static bool hvm_wait_for_io(struct ioreq_vcpu *sv, ioreq_t *p)
bool handle_hvm_io_completion(struct vcpu *v)
{
struct domain *d = v->domain;
struct hvm_vcpu_io *vio = &v->arch.hvm.hvm_io;
struct vcpu_io *vio = &v->io;
struct ioreq_server *s;
struct ioreq_vcpu *sv;
enum hvm_io_completion io_completion;
enum vio_completion completion;

if ( has_vpci(d) && vpci_process_pending(v) )
{
Expand All @@ -186,29 +186,29 @@ bool handle_hvm_io_completion(struct vcpu *v)
if ( sv && !hvm_wait_for_io(sv, get_ioreq(s, v)) )
return false;

vio->io_req.state = ioreq_needs_completion(&vio->io_req) ?
vio->req.state = ioreq_needs_completion(&vio->req) ?
STATE_IORESP_READY : STATE_IOREQ_NONE;

msix_write_completion(v);
vcpu_end_shutdown_deferral(v);

io_completion = vio->io_completion;
vio->io_completion = HVMIO_no_completion;
completion = vio->completion;
vio->completion = VIO_no_completion;

switch ( io_completion )
switch ( completion )
{
case HVMIO_no_completion:
case VIO_no_completion:
break;

case HVMIO_mmio_completion:
case VIO_mmio_completion:
return arch_ioreq_complete_mmio();

case HVMIO_pio_completion:
return handle_pio(vio->io_req.addr, vio->io_req.size,
vio->io_req.dir);
case VIO_pio_completion:
return handle_pio(vio->req.addr, vio->req.size,
vio->req.dir);

default:
return arch_vcpu_ioreq_completion(io_completion);
return arch_vcpu_ioreq_completion(completion);
}

return true;
Expand Down
2 changes: 1 addition & 1 deletion xen/include/asm-x86/hvm/emulate.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ bool __nonnull(1, 2) hvm_emulate_one_insn(
const char *descr);
int hvm_emulate_one(
struct hvm_emulate_ctxt *hvmemul_ctxt,
enum hvm_io_completion completion);
enum vio_completion completion);
void hvm_emulate_one_vm_event(enum emul_kind kind,
unsigned int trapnr,
unsigned int errcode);
Expand Down
11 changes: 0 additions & 11 deletions xen/include/asm-x86/hvm/vcpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,13 +28,6 @@
#include <asm/mtrr.h>
#include <public/hvm/ioreq.h>

enum hvm_io_completion {
HVMIO_no_completion,
HVMIO_mmio_completion,
HVMIO_pio_completion,
HVMIO_realmode_completion
};

struct hvm_vcpu_asid {
uint64_t generation;
uint32_t asid;
Expand All @@ -52,10 +45,6 @@ struct hvm_mmio_cache {
};

struct hvm_vcpu_io {
/* I/O request in flight to device model. */
enum hvm_io_completion io_completion;
ioreq_t io_req;

/*
* HVM emulation:
* Linear address @mmio_gla maps to MMIO physical frame @mmio_gpfn.
Expand Down
2 changes: 1 addition & 1 deletion xen/include/xen/ioreq.h
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ void hvm_ioreq_init(struct domain *d);
int ioreq_server_dm_op(struct xen_dm_op *op, struct domain *d, bool *const_op);

bool arch_ioreq_complete_mmio(void);
bool arch_vcpu_ioreq_completion(enum hvm_io_completion io_completion);
bool arch_vcpu_ioreq_completion(enum vio_completion completion);
int arch_ioreq_server_map_pages(struct ioreq_server *s);
void arch_ioreq_server_unmap_pages(struct ioreq_server *s);
void arch_ioreq_server_enable(struct ioreq_server *s);
Expand Down
19 changes: 19 additions & 0 deletions xen/include/xen/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,21 @@ void evtchn_destroy_final(struct domain *d); /* from complete_domain_destroy */

struct waitqueue_vcpu;

enum vio_completion {
VIO_no_completion,
VIO_mmio_completion,
VIO_pio_completion,
#ifdef CONFIG_X86
VIO_realmode_completion,
#endif
};

struct vcpu_io {
/* I/O request in flight to device model. */
enum vio_completion completion;
ioreq_t req;
};

struct vcpu
{
int vcpu_id;
Expand Down Expand Up @@ -258,6 +273,10 @@ struct vcpu
struct vpci_vcpu vpci;

struct arch_vcpu arch;

#ifdef CONFIG_IOREQ_SERVER
struct vcpu_io io;
#endif
};

struct sched_unit {
Expand Down

0 comments on commit fe5df63

Please sign in to comment.