Skip to content

Commit 17771c0

Browse files
junjiemao1lijinxia
authored andcommitted
HV: io: refine state transitions of VHM requests
Instead of using two members for maintaining the state of a VHM request, this patch replaces the transitions with a single state. Basically the lifecycle of a VHM request shall be: FREE -> PENDING -> PROCESSING -> COMPLETE -> FREE -> ... The structure header of vhm_request has more details of the transitions access limitations under different states. Also drop the set but unused member vcpu.ioreq_pending. For backward-compatibility, the obsolete 'valid' member is still kept and maintained before SOS and DM adapts to the new state transitions. v2 -> v3: * Use complete_ioreq to mark an I/O request finished in dm_emulate_(pio|mmio)_post. Signed-off-by: Junjie Mao <junjie.mao@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
1 parent 941eb9d commit 17771c0

File tree

8 files changed

+144
-51
lines changed

8 files changed

+144
-51
lines changed

hypervisor/arch/x86/guest/vcpu.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,6 @@ int create_vcpu(uint16_t pcpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle)
117117
vcpu->launched = false;
118118
vcpu->paused_cnt = 0U;
119119
vcpu->running = 0;
120-
vcpu->ioreq_pending = 0;
121120
vcpu->arch_vcpu.nr_sipi = 0;
122121
vcpu->pending_pre_work = 0U;
123122
vcpu->state = VCPU_INIT;
@@ -277,7 +276,6 @@ void reset_vcpu(struct vcpu *vcpu)
277276
vcpu->launched = false;
278277
vcpu->paused_cnt = 0U;
279278
vcpu->running = 0;
280-
vcpu->ioreq_pending = 0;
281279
vcpu->arch_vcpu.nr_sipi = 0;
282280
vcpu->pending_pre_work = 0U;
283281

hypervisor/arch/x86/guest/vioapic.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -613,12 +613,10 @@ int vioapic_mmio_access_handler(struct vcpu *vcpu, struct io_request *io_req,
613613
gpa,
614614
&data);
615615
mmio->value = (uint64_t)data;
616-
io_req->processed = REQ_STATE_SUCCESS;
617616
} else if (mmio->direction == REQUEST_WRITE) {
618617
vioapic_mmio_write(vm,
619618
gpa,
620619
data);
621-
io_req->processed = REQ_STATE_SUCCESS;
622620
} else {
623621
/* Can never happen due to the range of direction. */
624622
}
@@ -627,6 +625,7 @@ int vioapic_mmio_access_handler(struct vcpu *vcpu, struct io_request *io_req,
627625
ret = -EINVAL;
628626
}
629627

628+
io_req->processed = REQ_STATE_COMPLETE;
630629
return ret;
631630
}
632631

hypervisor/arch/x86/guest/vlapic.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2069,17 +2069,16 @@ int vlapic_mmio_access_handler(struct vcpu *vcpu, struct io_request *io_req,
20692069
gpa,
20702070
&mmio_req->value,
20712071
mmio_req->size);
2072-
io_req->processed = REQ_STATE_SUCCESS;
20732072
} else if (mmio_req->direction == REQUEST_WRITE) {
20742073
ret = vlapic_write_mmio_reg(vcpu,
20752074
gpa,
20762075
mmio_req->value,
20772076
mmio_req->size);
2078-
io_req->processed = REQ_STATE_SUCCESS;
20792077
} else {
20802078
/* Can never happen due to the range of mmio_req->direction. */
20812079
}
20822080

2081+
io_req->processed = REQ_STATE_COMPLETE;
20832082
return ret;
20842083
}
20852084

hypervisor/arch/x86/io.c

Lines changed: 17 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,12 @@
99
#include "guest/instr_emul_wrapper.h"
1010
#include "guest/instr_emul.h"
1111

12+
static void complete_ioreq(struct vhm_request *vhm_req)
13+
{
14+
vhm_req->valid = 0;
15+
atomic_store32(&vhm_req->processed, REQ_STATE_FREE);
16+
}
17+
1218
/**
1319
* @pre io_req->type == REQ_PORTIO
1420
*/
@@ -19,7 +25,7 @@ emulate_pio_post(struct vcpu *vcpu, struct io_request *io_req)
1925
struct pio_request *pio_req = &io_req->reqs.pio;
2026
uint64_t mask = 0xFFFFFFFFUL >> (32UL - 8UL * pio_req->size);
2127

22-
if (io_req->processed == REQ_STATE_SUCCESS) {
28+
if (io_req->processed == REQ_STATE_COMPLETE) {
2329
if (pio_req->direction == REQUEST_READ) {
2430
uint64_t value = (uint64_t)pio_req->value;
2531
int32_t context_idx = vcpu->arch_vcpu.cur_context;
@@ -53,11 +59,10 @@ int32_t dm_emulate_pio_post(struct vcpu *vcpu)
5359
req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page);
5460
vhm_req = &req_buf->req_queue[cur];
5561

56-
io_req->processed = vhm_req->processed;
5762
pio_req->value = vhm_req->reqs.pio.value;
5863

5964
/* VHM emulation data already copy to req, mark to free slot now */
60-
vhm_req->valid = 0;
65+
complete_ioreq(vhm_req);
6166

6267
return emulate_pio_post(vcpu, io_req);
6368
}
@@ -70,7 +75,7 @@ int32_t emulate_mmio_post(struct vcpu *vcpu, struct io_request *io_req)
7075
int32_t ret;
7176
struct mmio_request *mmio_req = &io_req->reqs.mmio;
7277

73-
if (io_req->processed == REQ_STATE_SUCCESS) {
78+
if (io_req->processed == REQ_STATE_COMPLETE) {
7479
if (mmio_req->direction == REQUEST_READ) {
7580
/* Emulate instruction and update vcpu register set */
7681
ret = emulate_instruction(vcpu);
@@ -99,38 +104,26 @@ int32_t dm_emulate_mmio_post(struct vcpu *vcpu)
99104
vhm_req = &req_buf->req_queue[cur];
100105

101106
mmio_req->value = vhm_req->reqs.mmio.value;
102-
io_req->processed = vhm_req->processed;
103107

104108
/* VHM emulation data already copy to req, mark to free slot now */
105-
vhm_req->valid = 0;
109+
complete_ioreq(vhm_req);
106110

107111
return emulate_mmio_post(vcpu, io_req);
108112
}
109113

110-
static void complete_ioreq(struct vcpu *vcpu)
111-
{
112-
union vhm_request_buffer *req_buf;
113-
struct vhm_request *vhm_req;
114-
115-
req_buf = (union vhm_request_buffer *)
116-
vcpu->vm->sw.io_shared_page;
117-
vhm_req = &req_buf->req_queue[vcpu->vcpu_id];
118-
119-
vhm_req->valid = 0;
120-
atomic_store32(&vcpu->ioreq_pending, 0U);
121-
}
122-
123114
void emulate_io_post(struct vcpu *vcpu)
124115
{
125116
union vhm_request_buffer *req_buf;
126117
struct vhm_request *vhm_req;
118+
struct io_request *io_req = &vcpu->req;
127119

128120
req_buf = (union vhm_request_buffer *)vcpu->vm->sw.io_shared_page;
129121
vhm_req = &req_buf->req_queue[vcpu->vcpu_id];
122+
io_req->processed = atomic_load32(&vhm_req->processed);
130123

131124
if ((vhm_req->valid == 0) ||
132-
((vhm_req->processed != REQ_STATE_SUCCESS) &&
133-
(vhm_req->processed != REQ_STATE_FAILED))) {
125+
((io_req->processed != REQ_STATE_COMPLETE) &&
126+
(io_req->processed != REQ_STATE_FAILED))) {
134127
return;
135128
}
136129

@@ -139,7 +132,7 @@ void emulate_io_post(struct vcpu *vcpu)
139132
* mark ioreq done and don't resume vcpu.
140133
*/
141134
if (vcpu->state == VCPU_ZOMBIE) {
142-
complete_ioreq(vcpu);
135+
complete_ioreq(vhm_req);
143136
return;
144137
}
145138

@@ -162,7 +155,7 @@ void emulate_io_post(struct vcpu *vcpu)
162155
default:
163156
/* REQ_WP can only be triggered on writes which do not need
164157
* post-work. Just mark the ioreq done. */
165-
complete_ioreq(vcpu);
158+
complete_ioreq(vhm_req);
166159
break;
167160
}
168161

@@ -222,7 +215,7 @@ hv_emulate_pio(struct vcpu *vcpu, struct io_request *io_req)
222215
}
223216
/* TODO: failures in the handlers should be reflected
224217
* here. */
225-
io_req->processed = REQ_STATE_SUCCESS;
218+
io_req->processed = REQ_STATE_COMPLETE;
226219
status = 0;
227220
break;
228221
}

hypervisor/common/hypercall.c

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -334,6 +334,8 @@ int32_t hcall_set_ioreq_buffer(struct vm *vm, uint16_t vmid, uint64_t param)
334334
uint64_t hpa = 0UL;
335335
struct acrn_set_ioreq_buffer iobuf;
336336
struct vm *target_vm = get_vm_from_vmid(vmid);
337+
union vhm_request_buffer *req_buf;
338+
uint16_t i;
337339

338340
if (target_vm == NULL) {
339341
return -1;
@@ -358,6 +360,11 @@ int32_t hcall_set_ioreq_buffer(struct vm *vm, uint16_t vmid, uint64_t param)
358360

359361
target_vm->sw.io_shared_page = HPA2HVA(hpa);
360362

363+
req_buf = target_vm->sw.io_shared_page;
364+
for (i = 0U; i < VHM_REQUEST_MAX; i++) {
365+
atomic_store32(&req_buf->req_queue[i].processed, REQ_STATE_FREE);
366+
}
367+
361368
return ret;
362369
}
363370

hypervisor/common/io_request.c

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -72,28 +72,30 @@ acrn_insert_request_wait(struct vcpu *vcpu, struct io_request *io_req)
7272
}
7373

7474
req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page);
75-
76-
/* ACRN insert request to VHM and inject upcall */
7775
cur = vcpu->vcpu_id;
7876
vhm_req = &req_buf->req_queue[cur];
77+
78+
ASSERT(atomic_load32(&vhm_req->processed) == REQ_STATE_FREE,
79+
"VHM request buffer is busy");
80+
81+
/* ACRN insert request to VHM and inject upcall */
7982
vhm_req->type = io_req->type;
80-
vhm_req->processed = io_req->processed;
8183
(void)memcpy_s(&vhm_req->reqs, sizeof(union vhm_io_request),
8284
&io_req->reqs, sizeof(union vhm_io_request));
8385

8486
/* pause vcpu, wait for VHM to handle the MMIO request.
8587
* TODO: when pause_vcpu changed to switch vcpu out directlly, we
8688
* should fix the race issue between req.valid = true and vcpu pause
8789
*/
88-
atomic_store32(&vcpu->ioreq_pending, 1U);
8990
pause_vcpu(vcpu, VCPU_PAUSED);
9091

91-
/* Must clear the signal before we mark req valid
92-
* Once we mark to valid, VHM may process req and signal us
92+
/* Must clear the signal before we mark req as pending
93+
* Once we mark it pending, VHM may process req and signal us
9394
* before we perform upcall.
9495
* because VHM can work in pulling mode without wait for upcall
9596
*/
9697
vhm_req->valid = 1;
98+
atomic_store32(&vhm_req->processed, REQ_STATE_PENDING);
9799

98100
acrn_print_request(vcpu->vcpu_id, vhm_req);
99101

@@ -140,7 +142,7 @@ static void _get_req_info_(struct vhm_request *req, int *id, char *type,
140142
}
141143

142144
switch (req->processed) {
143-
case REQ_STATE_SUCCESS:
145+
case REQ_STATE_COMPLETE:
144146
(void)strcpy_s(state, 16U, "SUCCESS");
145147
break;
146148
case REQ_STATE_PENDING:

hypervisor/include/arch/x86/guest/vcpu.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -248,7 +248,6 @@ struct vcpu {
248248
bool launched; /* Whether the vcpu is launched on target pcpu */
249249
uint32_t paused_cnt; /* how many times vcpu is paused */
250250
uint32_t running; /* vcpu is picked up and run? */
251-
uint32_t ioreq_pending; /* ioreq is ongoing or not? */
252251

253252
struct io_request req; /* used by io/ept emulation */
254253

hypervisor/include/public/acrn_common.h

Lines changed: 109 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,9 @@
2626
*/
2727
#define VHM_REQUEST_MAX 16U
2828

29+
#define REQ_STATE_FREE 3
2930
#define REQ_STATE_PENDING 0
30-
#define REQ_STATE_SUCCESS 1
31+
#define REQ_STATE_COMPLETE 1
3132
#define REQ_STATE_PROCESSING 2
3233
#define REQ_STATE_FAILED -1
3334

@@ -90,27 +91,122 @@ union vhm_io_request {
9091
int64_t reserved1[8];
9192
};
9293

93-
/* vhm_request are 256Bytes aligned */
94+
/**
95+
* @brief 256-byte VHM requests
96+
*
97+
* The state transitions of a VHM request are:
98+
*
99+
* FREE -> PENDING -> PROCESSING -> COMPLETE -> FREE -> ...
100+
* \ /
101+
* +--> FAILED -+
102+
*
103+
* When a request is in COMPLETE or FREE state, the request is owned by the
104+
* hypervisor. SOS (VHM or DM) shall not read or write the internals of the
105+
* request except the state.
106+
*
107+
* When a request is in PENDING or PROCESSING state, the request is owned by
108+
* SOS. The hypervisor shall not read or write the request other than the state.
109+
*
110+
* Based on the rules above, a typical VHM request lifecycle should looks like
111+
* the following.
112+
*
113+
* (assume the initial state is FREE)
114+
*
115+
* SOS vCPU 0 SOS vCPU x UOS vCPU y
116+
*
117+
* hypervisor:
118+
* fill in type, addr, etc.
119+
* pause UOS vcpu y
120+
* set state to PENDING (a)
121+
* fire upcall to SOS vCPU 0
122+
*
123+
* VHM:
124+
* scan for pending requests
125+
* set state to PROCESSING (b)
126+
* assign requests to clients (c)
127+
*
128+
* client:
129+
* scan for assigned requests
130+
* handle the requests (d)
131+
* set state to COMPLETE
132+
* notify the hypervisor
133+
*
134+
* hypervisor:
135+
* resume UOS vcpu y (e)
136+
*
137+
* hypervisor:
138+
* post-work (f)
139+
* set state to FREE
140+
*
141+
* Note that the following shall hold.
142+
*
143+
* 1. (a) happens before (b)
144+
* 2. (c) happens before (d)
145+
* 3. (e) happens before (f)
146+
* 4. One vCPU cannot trigger another I/O request before the previous one has
147+
* completed (i.e. the state switched to FREE)
148+
*
149+
* Accesses to the state of a vhm_request shall be atomic and proper barriers
150+
* are needed to ensure that:
151+
*
152+
* 1. Setting state to PENDING is the last operation when issuing a request in
153+
* the hypervisor, as the hypervisor shall not access the request any more.
154+
*
155+
* 2. Due to similar reasons, setting state to COMPLETE is the last operation
156+
* of request handling in VHM or clients in SOS.
157+
*
158+
* The state FAILED is an obsolete state to indicate that the I/O request cannot
159+
* be handled. In such cases the mediators and DM should switch the state to
160+
* COMPLETE with the value set to all 1s for read, and skip the request for
161+
* writes. This state WILL BE REMOVED after the mediators and DM are updated to
162+
* follow this rule.
163+
*/
94164
struct vhm_request {
95-
/* offset: 0bytes - 63bytes */
165+
/**
166+
* Type of this request.
167+
*
168+
* Byte offset: 0.
169+
*/
96170
uint32_t type;
97-
int32_t reserved0[15];
98171

99-
/* offset: 64bytes-127bytes */
172+
/**
173+
* Reserved.
174+
*
175+
* Byte offset: 4.
176+
*/
177+
uint32_t reserved0[15];
178+
179+
/**
180+
* Details about this request. For REQ_PORTIO, this has type
181+
* pio_request. For REQ_MMIO and REQ_WP, this has type mmio_request. For
182+
* REQ_PCICFG, this has type pci_request.
183+
*
184+
* Byte offset: 64.
185+
*/
100186
union vhm_io_request reqs;
101187

102-
/* True: valid req which need VHM to process.
103-
* ACRN write, VHM read only
104-
**/
188+
/**
189+
* Whether this request is valid for processing. ACRN write, VHM read
190+
* only.
191+
*
192+
* Warning; this field is obsolete and will be removed soon.
193+
*
194+
* Byte offset: 128.
195+
*/
105196
int32_t valid;
106197

107-
/* the client which is distributed to handle this request */
198+
/**
199+
* The client which is distributed to handle this request. Accessed by
200+
* VHM only.
201+
*
202+
* Byte offset: 132.
203+
*/
108204
int32_t client;
109205

110-
/* 1: VHM had processed and success
111-
* 0: VHM had not yet processed
112-
* -1: VHM failed to process. Invalid request
113-
* VHM write, ACRN read only
206+
/**
207+
* The status of this request, taking REQ_STATE_xxx as values.
208+
*
209+
* Byte offset: 136.
114210
*/
115211
int32_t processed;
116212
} __aligned(256);

0 commit comments

Comments
 (0)