Skip to content

Commit 6e96243

Browse files
junjiemao1lijinxia
authored andcommitted
HV: io: drop REQ_STATE_FAILED
Now the DM has adopted the new VHM request state transitions and REQ_STATE_FAILED is obsolete since neither VHM nor kernel mediators will set the state to FAILED. This patch drops the definition to REQ_STATE_FAILED in the hypervisor, makes ''processed'' unsigned to make the compiler happy about typing and simplifies error handling in the following ways. * (dm_)emulate_(pio|mmio)_post no longer returns an error code, by introducing a constraint that these functions must be called after an I/O request completes (which is the case in the current design) and assuming handlers/VHM/DM will always give a value for reads (typically all 1's if the requested address is invalid). * emulate_io() now returns a positive value IOREQ_PENDING to indicate that the request is sent to VHM. This mitigates a potential race between dm_emulate_pio() and pio_instr_vmexit_handler() which can cause emulate_pio_post() being called twice for the same request. * Remove the ''processed'' member in io_request. Previously this mirrors the state of the VHM request which terminates at either COMPLETE or FAILED. After the FAILED state is removed, the terminal state will always be constantly COMPLETE. Thus the mirrored ''processed'' member is no longer useful. Note that emulate_instruction() will always succeed after a reshuffle, and this patch takes that assumption in advance. This does not hurt as that returned value is not currently handled. This patch makes it explicit that I/O emulation is not expected to fail. One issue remains, though, which occurs when a non-aligned cross-boundary access happens. Currently the hypervisor, VHM and DM adopts different policy: * Hypervisor: inject #GP if it detects that the access crossed boundary * VHM: deliver to DM if the access does not complete falls in the range of a client * DM: a handler covering part of the to-be-accessed region is picked and assertion failure can be triggered. A high-level design covering all these components (in addition to instruction emulation) is needed for this. Thus this patch does not yet cover the issue. Tracked-On: #875 Signed-off-by: Junjie Mao <junjie.mao@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
1 parent ca83c09 commit 6e96243

File tree

7 files changed

+49
-78
lines changed

7 files changed

+49
-78
lines changed

hypervisor/arch/x86/ept.c

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,6 @@ int ept_violation_vmexit_handler(struct vcpu *vcpu)
156156
exit_qual = vcpu->arch_vcpu.exit_qualification;
157157

158158
io_req->type = REQ_MMIO;
159-
io_req->processed = REQ_STATE_PENDING;
160159

161160
/* Specify if read or write operation */
162161
if ((exit_qual & 0x2UL) != 0UL) {
@@ -214,14 +213,11 @@ int ept_violation_vmexit_handler(struct vcpu *vcpu)
214213

215214
status = emulate_io(vcpu, io_req);
216215

217-
/* io_req is hypervisor-private. For requests sent to VHM,
218-
* io_req->processed will be PENDING till dm_emulate_mmio_post() is
219-
* called on vcpu resume. */
220216
if (status == 0) {
221-
if (io_req->processed != REQ_STATE_PENDING) {
222-
status = emulate_mmio_post(vcpu, io_req);
223-
}
224-
}
217+
emulate_mmio_post(vcpu, io_req);
218+
} else if (status == IOREQ_PENDING) {
219+
status = 0;
220+
}
225221

226222
return status;
227223

hypervisor/arch/x86/guest/vlapic.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2097,7 +2097,6 @@ int vlapic_mmio_access_handler(struct vcpu *vcpu, struct io_request *io_req,
20972097
/* Can never happen due to the range of mmio_req->direction. */
20982098
}
20992099

2100-
io_req->processed = REQ_STATE_COMPLETE;
21012100
return ret;
21022101
}
21032102

hypervisor/arch/x86/io.c

Lines changed: 35 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -16,35 +16,33 @@ static void complete_ioreq(struct vhm_request *vhm_req)
1616

1717
/**
1818
* @pre io_req->type == REQ_PORTIO
19+
*
20+
* @remark This function must be called when \p io_req is completed, after
21+
* either a previous call to emulate_io() returning 0 or the corresponding VHM
22+
* request having transferred to the COMPLETE state.
1923
*/
20-
static int32_t
24+
static void
2125
emulate_pio_post(struct vcpu *vcpu, struct io_request *io_req)
2226
{
23-
int32_t status;
2427
struct pio_request *pio_req = &io_req->reqs.pio;
2528
uint64_t mask = 0xFFFFFFFFUL >> (32UL - 8UL * pio_req->size);
2629

27-
if (io_req->processed == REQ_STATE_COMPLETE) {
28-
if (pio_req->direction == REQUEST_READ) {
29-
uint64_t value = (uint64_t)pio_req->value;
30-
int32_t context_idx = vcpu->arch_vcpu.cur_context;
31-
uint64_t rax = vcpu_get_gpreg(vcpu, CPU_REG_RAX);
30+
if (pio_req->direction == REQUEST_READ) {
31+
uint64_t value = (uint64_t)pio_req->value;
32+
uint64_t rax = vcpu_get_gpreg(vcpu, CPU_REG_RAX);
3233

33-
rax = ((rax) & ~mask) | (value & mask);
34-
vcpu_set_gpreg(vcpu, CPU_REG_RAX, rax);
35-
}
36-
status = 0;
37-
} else {
38-
status = -1;
34+
rax = ((rax) & ~mask) | (value & mask);
35+
vcpu_set_gpreg(vcpu, CPU_REG_RAX, rax);
3936
}
40-
41-
return status;
4237
}
4338

4439
/**
4540
* @pre vcpu->req.type == REQ_PORTIO
41+
*
42+
* @remark This function must be called after the VHM request corresponding to
43+
* \p vcpu being transferred to the COMPLETE state.
4644
*/
47-
int32_t dm_emulate_pio_post(struct vcpu *vcpu)
45+
void dm_emulate_pio_post(struct vcpu *vcpu)
4846
{
4947
uint16_t cur = vcpu->vcpu_id;
5048
union vhm_request_buffer *req_buf = NULL;
@@ -60,35 +58,33 @@ int32_t dm_emulate_pio_post(struct vcpu *vcpu)
6058
/* VHM emulation data already copy to req, mark to free slot now */
6159
complete_ioreq(vhm_req);
6260

63-
return emulate_pio_post(vcpu, io_req);
61+
emulate_pio_post(vcpu, io_req);
6462
}
6563

6664
/**
6765
* @pre vcpu->req.type == REQ_MMIO
66+
*
67+
* @remark This function must be called when \p io_req is completed, after
68+
* either a previous call to emulate_io() returning 0 or the corresponding VHM
69+
* request having transferred to the COMPLETE state.
6870
*/
69-
int32_t emulate_mmio_post(struct vcpu *vcpu, struct io_request *io_req)
71+
void emulate_mmio_post(struct vcpu *vcpu, struct io_request *io_req)
7072
{
71-
int32_t ret;
7273
struct mmio_request *mmio_req = &io_req->reqs.mmio;
7374

74-
if (io_req->processed == REQ_STATE_COMPLETE) {
75-
if (mmio_req->direction == REQUEST_READ) {
76-
/* Emulate instruction and update vcpu register set */
77-
ret = emulate_instruction(vcpu);
78-
} else {
79-
ret = 0;
80-
}
81-
} else {
82-
ret = 0;
75+
if (mmio_req->direction == REQUEST_READ) {
76+
/* Emulate instruction and update vcpu register set */
77+
emulate_instruction(vcpu);
8378
}
84-
85-
return ret;
8679
}
8780

8881
/**
8982
* @pre vcpu->req.type == REQ_MMIO
83+
*
84+
* @remark This function must be called after the VHM request corresponding to
85+
* \p vcpu being transferred to the COMPLETE state.
9086
*/
91-
int32_t dm_emulate_mmio_post(struct vcpu *vcpu)
87+
void dm_emulate_mmio_post(struct vcpu *vcpu)
9288
{
9389
uint16_t cur = vcpu->vcpu_id;
9490
struct io_request *io_req = &vcpu->req;
@@ -104,7 +100,7 @@ int32_t dm_emulate_mmio_post(struct vcpu *vcpu)
104100
/* VHM emulation data already copy to req, mark to free slot now */
105101
complete_ioreq(vhm_req);
106102

107-
return emulate_mmio_post(vcpu, io_req);
103+
emulate_mmio_post(vcpu, io_req);
108104
}
109105

110106
#ifdef CONFIG_PARTITION_MODE
@@ -123,15 +119,12 @@ void emulate_io_post(struct vcpu *vcpu)
123119
{
124120
union vhm_request_buffer *req_buf;
125121
struct vhm_request *vhm_req;
126-
struct io_request *io_req = &vcpu->req;
127122

128123
req_buf = (union vhm_request_buffer *)vcpu->vm->sw.io_shared_page;
129124
vhm_req = &req_buf->req_queue[vcpu->vcpu_id];
130-
io_req->processed = atomic_load32(&vhm_req->processed);
131125

132126
if ((vhm_req->valid == 0) ||
133-
((io_req->processed != REQ_STATE_COMPLETE) &&
134-
(io_req->processed != REQ_STATE_FAILED))) {
127+
(atomic_load32(&vhm_req->processed) != REQ_STATE_COMPLETE)) {
135128
return;
136129
}
137130

@@ -205,7 +198,6 @@ hv_emulate_pio(struct vcpu *vcpu, struct io_request *io_req)
205198
pr_fatal("Err:IO, port 0x%04x, size=%hu spans devices",
206199
port, size);
207200
status = -EIO;
208-
io_req->processed = REQ_STATE_FAILED;
209201
break;
210202
} else {
211203
if (pio_req->direction == REQUEST_WRITE) {
@@ -221,9 +213,6 @@ hv_emulate_pio(struct vcpu *vcpu, struct io_request *io_req)
221213
pr_dbg("IO read on port %04x, data %08x",
222214
port, pio_req->value);
223215
}
224-
/* TODO: failures in the handlers should be reflected
225-
* here. */
226-
io_req->processed = REQ_STATE_COMPLETE;
227216
status = 0;
228217
break;
229218
}
@@ -266,7 +255,6 @@ hv_emulate_mmio(struct vcpu *vcpu, struct io_request *io_req)
266255
} else if (!((address >= base) && (address + size <= end))) {
267256
pr_fatal("Err MMIO, address:0x%llx, size:%x",
268257
address, size);
269-
io_req->processed = REQ_STATE_FAILED;
270258
return -EIO;
271259
} else {
272260
/* Handle this MMIO operation */
@@ -284,6 +272,7 @@ hv_emulate_mmio(struct vcpu *vcpu, struct io_request *io_req)
284272
* deliver to VHM.
285273
*
286274
* @return 0 - Successfully emulated by registered handlers.
275+
* @return IOREQ_PENDING - The I/O request is delivered to VHM.
287276
* @return -EIO - The request spans multiple devices and cannot be emulated.
288277
* @return Negative on other errors during emulation.
289278
*/
@@ -303,7 +292,6 @@ emulate_io(struct vcpu *vcpu, struct io_request *io_req)
303292
default:
304293
/* Unknown I/O request type */
305294
status = -EINVAL;
306-
io_req->processed = REQ_STATE_FAILED;
307295
break;
308296
}
309297

@@ -329,6 +317,8 @@ emulate_io(struct vcpu *vcpu, struct io_request *io_req)
329317
pr_fatal("Err:IO %s access to port 0x%04lx, size=%lu",
330318
(pio_req->direction != REQUEST_READ) ? "read" : "write",
331319
pio_req->address, pio_req->size);
320+
} else {
321+
status = IOREQ_PENDING;
332322
}
333323
#endif
334324
}
@@ -347,7 +337,6 @@ int32_t pio_instr_vmexit_handler(struct vcpu *vcpu)
347337
exit_qual = vcpu->arch_vcpu.exit_qualification;
348338

349339
io_req->type = REQ_PORTIO;
350-
io_req->processed = REQ_STATE_PENDING;
351340
pio_req->size = VM_EXIT_IO_INSTRUCTION_SIZE(exit_qual) + 1UL;
352341
pio_req->address = VM_EXIT_IO_INSTRUCTION_PORT_NUMBER(exit_qual);
353342
if (VM_EXIT_IO_INSTRUCTION_ACCESS_DIRECTION(exit_qual) == 0UL) {
@@ -365,13 +354,10 @@ int32_t pio_instr_vmexit_handler(struct vcpu *vcpu)
365354

366355
status = emulate_io(vcpu, io_req);
367356

368-
/* io_req is hypervisor-private. For requests sent to VHM,
369-
* io_req->processed will be PENDING till dm_emulate_pio_post() is
370-
* called on vcpu resume. */
371357
if (status == 0) {
372-
if (io_req->processed != REQ_STATE_PENDING) {
373-
status = emulate_pio_post(vcpu, io_req);
374-
}
358+
emulate_pio_post(vcpu, io_req);
359+
} else if (status == IOREQ_PENDING) {
360+
status = 0;
375361
}
376362

377363
return status;

hypervisor/common/io_request.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -139,16 +139,16 @@ static void local_get_req_info_(struct vhm_request *req, int *id, char *type,
139139

140140
switch (req->processed) {
141141
case REQ_STATE_COMPLETE:
142-
(void)strcpy_s(state, 16U, "SUCCESS");
142+
(void)strcpy_s(state, 16U, "COMPLETE");
143143
break;
144144
case REQ_STATE_PENDING:
145145
(void)strcpy_s(state, 16U, "PENDING");
146146
break;
147147
case REQ_STATE_PROCESSING:
148148
(void)strcpy_s(state, 16U, "PROCESS");
149149
break;
150-
case REQ_STATE_FAILED:
151-
(void)strcpy_s(state, 16U, "FAILED");
150+
case REQ_STATE_FREE:
151+
(void)strcpy_s(state, 16U, "FREE");
152152
break;
153153
default:
154154
(void)strcpy_s(state, 16U, "UNKNOWN");

hypervisor/dm/vioapic.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -597,7 +597,6 @@ int vioapic_mmio_access_handler(struct vcpu *vcpu, struct io_request *io_req,
597597
ret = -EINVAL;
598598
}
599599

600-
io_req->processed = REQ_STATE_COMPLETE;
601600
return ret;
602601
}
603602

hypervisor/include/arch/x86/ioreq.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,15 +10,15 @@
1010
#include <types.h>
1111
#include <acrn_common.h>
1212

13+
/* The return value of emulate_io() indicating the I/O request is delivered to
14+
* VHM but not finished yet. */
15+
#define IOREQ_PENDING 1
16+
1317
/* Internal representation of a I/O request. */
1418
struct io_request {
1519
/** Type of the request (PIO, MMIO, etc). Refer to vhm_request. */
1620
uint32_t type;
1721

18-
/** Status of request handling. Written by request handlers and read by
19-
* the I/O emulation framework. Refer to vhm_request. */
20-
int32_t processed;
21-
2222
/** Details of this request in the same format as vhm_request. */
2323
union vhm_io_request reqs;
2424
};
@@ -122,8 +122,8 @@ int register_mmio_emulation_handler(struct vm *vm,
122122
uint64_t end, void *handler_private_data);
123123
void unregister_mmio_emulation_handler(struct vm *vm, uint64_t start,
124124
uint64_t end);
125-
int32_t emulate_mmio_post(struct vcpu *vcpu, struct io_request *io_req);
126-
int32_t dm_emulate_mmio_post(struct vcpu *vcpu);
125+
void emulate_mmio_post(struct vcpu *vcpu, struct io_request *io_req);
126+
void dm_emulate_mmio_post(struct vcpu *vcpu);
127127

128128
int32_t emulate_io(struct vcpu *vcpu, struct io_request *io_req);
129129
void emulate_io_post(struct vcpu *vcpu);

hypervisor/include/public/acrn_common.h

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@
3030
#define REQ_STATE_PENDING 0
3131
#define REQ_STATE_COMPLETE 1
3232
#define REQ_STATE_PROCESSING 2
33-
#define REQ_STATE_FAILED -1
3433

3534
#define REQ_PORTIO 0U
3635
#define REQ_MMIO 1U
@@ -97,8 +96,6 @@ union vhm_io_request {
9796
* The state transitions of a VHM request are:
9897
*
9998
* FREE -> PENDING -> PROCESSING -> COMPLETE -> FREE -> ...
100-
* \ /
101-
* +--> FAILED -+
10299
*
103100
* When a request is in COMPLETE or FREE state, the request is owned by the
104101
* hypervisor. SOS (VHM or DM) shall not read or write the internals of the
@@ -154,12 +151,6 @@ union vhm_io_request {
154151
*
155152
* 2. Due to similar reasons, setting state to COMPLETE is the last operation
156153
* of request handling in VHM or clients in SOS.
157-
*
158-
* The state FAILED is an obsolete state to indicate that the I/O request cannot
159-
* be handled. In such cases the mediators and DM should switch the state to
160-
* COMPLETE with the value set to all 1s for read, and skip the request for
161-
* writes. This state WILL BE REMOVED after the mediators and DM are updated to
162-
* follow this rule.
163154
*/
164155
struct vhm_request {
165156
/**
@@ -208,7 +199,7 @@ struct vhm_request {
208199
*
209200
* Byte offset: 136.
210201
*/
211-
int32_t processed;
202+
uint32_t processed;
212203
} __aligned(256);
213204

214205
union vhm_request_buffer {

0 commit comments

Comments
 (0)