Skip to content

Commit b21b172

Browse files
junjiemao1lijinxia
authored andcommitted
HV: io: refactoring vmexit handler on EPT violation
This is the counterpart to the PIO emulation side. 1. ept_violation_vmexit_handler (entry point for handling vmexit on EPT instruction): Extract mmio address, size, direction and value (for write only), fill in an I/O request, invoke do_io to handle that and emulate_pio_post for post-processing. 2. emulate_io Handle the given I/O request, either completed by registered MMIO handlers or sent to VHM. 3. emulate_mmio_post: Update guest registers after the emulation is done. v2 -> v3: * Rename: emulate_mmio_by_handler -> hv_emulate_mmio. * Inline the original hv_emulate_mmio. * No longer check alignment. The handlers are responsible for handling unaligned accesses. v1 -> v2: * Rename: do_io -> emulate_io. Signed-off-by: Junjie Mao <junjie.mao@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
1 parent 50e4bc1 commit b21b172

File tree

3 files changed

+141
-128
lines changed

3 files changed

+141
-128
lines changed

hypervisor/arch/x86/ept.c

Lines changed: 27 additions & 124 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@
1111

1212
#define ACRN_DBG_EPT 6U
1313

14-
1514
static uint64_t find_next_table(uint32_t table_offset, void *table_base)
1615
{
1716
uint64_t table_entry;
@@ -190,22 +189,6 @@ bool is_ept_supported(void)
190189
return status;
191190
}
192191

193-
static int
194-
hv_emulate_mmio(struct vcpu *vcpu, struct io_request *io_req,
195-
struct mem_io_node *mmio_handler)
196-
{
197-
struct mmio_request *mmio_req = &io_req->reqs.mmio;
198-
199-
if ((mmio_req->address % mmio_req->size) != 0UL) {
200-
pr_err("access size not align with paddr");
201-
return -EINVAL;
202-
}
203-
204-
/* Handle this MMIO operation */
205-
return mmio_handler->read_write(vcpu, io_req,
206-
mmio_handler->handler_private_data);
207-
}
208-
209192
int register_mmio_emulation_handler(struct vm *vm,
210193
hv_mem_io_handler_t read_write, uint64_t start,
211194
uint64_t end, void *handler_private_data)
@@ -275,82 +258,30 @@ void unregister_mmio_emulation_handler(struct vm *vm, uint64_t start,
275258
}
276259
}
277260

278-
int dm_emulate_mmio_post(struct vcpu *vcpu)
279-
{
280-
int ret = 0;
281-
uint16_t cur = vcpu->vcpu_id;
282-
struct io_request *io_req = &vcpu->req;
283-
struct mmio_request *mmio_req = &io_req->reqs.mmio;
284-
union vhm_request_buffer *req_buf;
285-
struct vhm_request *vhm_req;
286-
287-
req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page);
288-
vhm_req = &req_buf->req_queue[cur];
289-
290-
mmio_req->value = vhm_req->reqs.mmio.value;
291-
io_req->processed = vhm_req->processed;
292-
293-
/* VHM emulation data already copy to req, mark to free slot now */
294-
vhm_req->valid = 0;
295-
296-
if (io_req->processed != REQ_STATE_SUCCESS) {
297-
goto out;
298-
}
299-
300-
if (mmio_req->direction == REQUEST_READ) {
301-
/* Emulate instruction and update vcpu register set */
302-
ret = emulate_instruction(vcpu);
303-
if (ret != 0) {
304-
goto out;
305-
}
306-
}
307-
308-
out:
309-
return ret;
310-
}
311-
312-
static int
313-
dm_emulate_mmio_pre(struct vcpu *vcpu, uint64_t exit_qual __unused)
314-
{
315-
int status;
316-
struct io_request *io_req = &vcpu->req;
317-
struct mmio_request *mmio_req = &io_req->reqs.mmio;
318-
319-
if (mmio_req->direction == REQUEST_WRITE) {
320-
status = emulate_instruction(vcpu);
321-
if (status != 0) {
322-
return status;
323-
}
324-
/* XXX: write access while EPT perm RX -> WP */
325-
if ((exit_qual & 0x38UL) == 0x28UL) {
326-
io_req->type = REQ_WP;
327-
}
328-
}
329-
330-
return 0;
331-
}
332-
333261
int ept_violation_vmexit_handler(struct vcpu *vcpu)
334262
{
335263
int status = -EINVAL, ret;
336264
uint64_t exit_qual;
337265
uint64_t gpa;
338-
struct list_head *pos;
339266
struct io_request *io_req = &vcpu->req;
340267
struct mmio_request *mmio_req = &io_req->reqs.mmio;
341-
struct mem_io_node *mmio_handler = NULL;
342-
343-
io_req->type = REQ_MMIO;
344-
io_req->processed = REQ_STATE_PENDING;
345268

346269
/* Handle page fault from guest */
347270
exit_qual = vcpu->arch_vcpu.exit_qualification;
348271

272+
io_req->type = REQ_MMIO;
273+
io_req->processed = REQ_STATE_PENDING;
274+
349275
/* Specify if read or write operation */
350276
if ((exit_qual & 0x2UL) != 0UL) {
351277
/* Write operation */
352278
mmio_req->direction = REQUEST_WRITE;
353279
mmio_req->value = 0UL;
280+
281+
/* XXX: write access while EPT perm RX -> WP */
282+
if ((exit_qual & 0x38UL) == 0x28UL) {
283+
io_req->type = REQ_WP;
284+
}
354285
} else {
355286
/* Read operation */
356287
mmio_req->direction = REQUEST_READ;
@@ -380,59 +311,31 @@ int ept_violation_vmexit_handler(struct vcpu *vcpu)
380311
goto out;
381312
}
382313

383-
list_for_each(pos, &vcpu->vm->mmio_list) {
384-
mmio_handler = list_entry(pos, struct mem_io_node, list);
385-
if (((mmio_req->address + mmio_req->size) <=
386-
mmio_handler->range_start) ||
387-
(mmio_req->address >= mmio_handler->range_end)) {
388-
continue;
389-
}
390-
else if (!((mmio_req->address >= mmio_handler->range_start) &&
391-
((mmio_req->address + mmio_req->size) <=
392-
mmio_handler->range_end))) {
393-
pr_fatal("Err MMIO, addr:0x%llx, size:%x",
394-
mmio_req->address, mmio_req->size);
395-
return -EIO;
396-
}
397314

398-
if (mmio_req->direction == REQUEST_WRITE) {
399-
if (emulate_instruction(vcpu) != 0) {
400-
goto out;
401-
}
402-
}
315+
/*
316+
* For MMIO write, ask DM to run MMIO emulation after
317+
* instruction emulation. For MMIO read, ask DM to run MMIO
318+
* emulation at first.
319+
*/
403320

404-
/* Call generic memory emulation handler
405-
* For MMIO write, call hv_emulate_mmio after
406-
* instruction emulation. For MMIO read,
407-
* call hv_emulate_mmio at first.
408-
*/
409-
hv_emulate_mmio(vcpu, io_req, mmio_handler);
410-
if (mmio_req->direction == REQUEST_READ) {
411-
/* Emulate instruction and update vcpu register set */
412-
if (emulate_instruction(vcpu) != 0) {
413-
goto out;
414-
}
321+
/* Determine value being written. */
322+
if (mmio_req->direction == REQUEST_WRITE) {
323+
status = emulate_instruction(vcpu);
324+
if (status != 0) {
325+
goto out;
415326
}
416-
417-
status = 0;
418-
break;
419327
}
420328

421-
if (status != 0) {
422-
/*
423-
* No mmio handler from HV side, search from VHM in Dom0
424-
*
425-
* ACRN insert request to VHM and inject upcall
426-
* For MMIO write, ask DM to run MMIO emulation after
427-
* instruction emulation. For MMIO read, ask DM to run MMIO
428-
* emulation at first.
429-
*/
430-
if (dm_emulate_mmio_pre(vcpu, exit_qual) != 0) {
431-
goto out;
432-
}
329+
status = emulate_io(vcpu, io_req);
433330

434-
status = acrn_insert_request_wait(vcpu, &vcpu->req);
435-
}
331+
/* io_req is hypervisor-private. For requests sent to VHM,
332+
* io_req->processed will be PENDING till dm_emulate_mmio_post() is
333+
* called on vcpu resume. */
334+
if (status == 0) {
335+
if (io_req->processed != REQ_STATE_PENDING) {
336+
status = emulate_mmio_post(vcpu, io_req);
337+
}
338+
}
436339

437340
return status;
438341

hypervisor/arch/x86/io.c

Lines changed: 110 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,9 @@
66

77
#include <hypervisor.h>
88

9+
#include "guest/instr_emul_wrapper.h"
10+
#include "guest/instr_emul.h"
11+
912
/**
1013
* @pre io_req->type == REQ_PORTIO
1114
*/
@@ -59,6 +62,51 @@ int32_t dm_emulate_pio_post(struct vcpu *vcpu)
5962
return emulate_pio_post(vcpu, io_req);
6063
}
6164

65+
/**
66+
* @pre vcpu->req.type == REQ_MMIO
67+
*/
68+
int32_t emulate_mmio_post(struct vcpu *vcpu, struct io_request *io_req)
69+
{
70+
int32_t ret;
71+
struct mmio_request *mmio_req = &io_req->reqs.mmio;
72+
73+
if (io_req->processed == REQ_STATE_SUCCESS) {
74+
if (mmio_req->direction == REQUEST_READ) {
75+
/* Emulate instruction and update vcpu register set */
76+
ret = emulate_instruction(vcpu);
77+
} else {
78+
ret = 0;
79+
}
80+
} else {
81+
ret = 0;
82+
}
83+
84+
return ret;
85+
}
86+
87+
/**
88+
* @pre vcpu->req.type == REQ_MMIO
89+
*/
90+
int32_t dm_emulate_mmio_post(struct vcpu *vcpu)
91+
{
92+
uint16_t cur = vcpu->vcpu_id;
93+
struct io_request *io_req = &vcpu->req;
94+
struct mmio_request *mmio_req = &io_req->reqs.mmio;
95+
union vhm_request_buffer *req_buf;
96+
struct vhm_request *vhm_req;
97+
98+
req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page);
99+
vhm_req = &req_buf->req_queue[cur];
100+
101+
mmio_req->value = vhm_req->reqs.mmio.value;
102+
io_req->processed = vhm_req->processed;
103+
104+
/* VHM emulation data already copy to req, mark to free slot now */
105+
vhm_req->valid = 0;
106+
107+
return emulate_mmio_post(vcpu, io_req);
108+
}
109+
62110
/**
63111
* Try handling the given request by any port I/O handler registered in the
64112
* hypervisor.
@@ -94,6 +142,7 @@ hv_emulate_pio(struct vcpu *vcpu, struct io_request *io_req)
94142
pr_fatal("Err:IO, port 0x%04x, size=%hu spans devices",
95143
port, size);
96144
status = -EIO;
145+
io_req->processed = REQ_STATE_FAILED;
97146
break;
98147
} else {
99148
if (pio_req->direction == REQUEST_WRITE) {
@@ -120,12 +169,57 @@ hv_emulate_pio(struct vcpu *vcpu, struct io_request *io_req)
120169
return status;
121170
}
122171

172+
/**
173+
* Use registered MMIO handlers on the given request if it falls in the range of
174+
* any of them.
175+
*
176+
* @pre io_req->type == REQ_MMIO
177+
*
178+
* @return 0 - Successfully emulated by registered handlers.
179+
* @return -ENODEV - No proper handler found.
180+
* @return -EIO - The request spans multiple devices and cannot be emulated.
181+
*/
182+
static int32_t
183+
hv_emulate_mmio(struct vcpu *vcpu, struct io_request *io_req)
184+
{
185+
int status = -ENODEV;
186+
uint64_t address, size;
187+
struct list_head *pos;
188+
struct mmio_request *mmio_req = &io_req->reqs.mmio;
189+
struct mem_io_node *mmio_handler = NULL;
190+
191+
address = mmio_req->address;
192+
size = mmio_req->size;
193+
194+
list_for_each(pos, &vcpu->vm->mmio_list) {
195+
uint64_t base, end;
196+
197+
mmio_handler = list_entry(pos, struct mem_io_node, list);
198+
base = mmio_handler->range_start;
199+
end = mmio_handler->range_end;
200+
201+
if ((address + size <= base) || (address >= end)) {
202+
continue;
203+
} else if (!((address >= base) && (address + size <= end))) {
204+
pr_fatal("Err MMIO, address:0x%llx, size:%x",
205+
address, size);
206+
io_req->processed = REQ_STATE_FAILED;
207+
return -EIO;
208+
} else {
209+
/* Handle this MMIO operation */
210+
status = mmio_handler->read_write(vcpu, io_req,
211+
mmio_handler->handler_private_data);
212+
break;
213+
}
214+
}
215+
216+
return status;
217+
}
218+
123219
/**
124220
* Handle an I/O request by either invoking a hypervisor-internal handler or
125221
* deliver to VHM.
126222
*
127-
* @pre io_req->type == REQ_PORTIO
128-
*
129223
* @return 0 - Successfully emulated by registered handlers.
130224
* @return -EIO - The request spans multiple devices and cannot be emulated.
131225
* @return Negative on other errors during emulation.
@@ -135,7 +229,20 @@ emulate_io(struct vcpu *vcpu, struct io_request *io_req)
135229
{
136230
int32_t status;
137231

138-
status = hv_emulate_pio(vcpu, io_req);
232+
switch (io_req->type) {
233+
case REQ_PORTIO:
234+
status = hv_emulate_pio(vcpu, io_req);
235+
break;
236+
case REQ_MMIO:
237+
case REQ_WP:
238+
status = hv_emulate_mmio(vcpu, io_req);
239+
break;
240+
default:
241+
/* Unknown I/O request type */
242+
status = -EINVAL;
243+
io_req->processed = REQ_STATE_FAILED;
244+
break;
245+
}
139246

140247
if (status == -ENODEV) {
141248
/*

hypervisor/include/arch/x86/ioreq.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,10 @@ int register_mmio_emulation_handler(struct vm *vm,
122122
uint64_t end, void *handler_private_data);
123123
void unregister_mmio_emulation_handler(struct vm *vm, uint64_t start,
124124
uint64_t end);
125-
int dm_emulate_mmio_post(struct vcpu *vcpu);
125+
int32_t emulate_mmio_post(struct vcpu *vcpu, struct io_request *io_req);
126+
int32_t dm_emulate_mmio_post(struct vcpu *vcpu);
127+
128+
int32_t emulate_io(struct vcpu *vcpu, struct io_request *io_req);
126129

127130
int32_t acrn_insert_request_wait(struct vcpu *vcpu, struct io_request *req);
128131

0 commit comments

Comments
 (0)